From: Matthew Brost <matthew.brost@intel.com>
To: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
Cc: intel-xe@lists.freedesktop.org,
"Christian König" <christian.koenig@amd.com>,
"Somalapuram Amaranath" <Amaranath.Somalapuram@amd.com>,
dri-devel@lists.freedesktop.org
Subject: Re: [PATCH v3 01/21] drm/ttm: Allow TTM LRU list nodes of different types
Date: Tue, 21 May 2024 13:12:33 +0000 [thread overview]
Message-ID: <ZkydweaFUX/2kEw3@DUT025-TGLU.fm.intel.com> (raw)
In-Reply-To: <20240521071639.77614-2-thomas.hellstrom@linux.intel.com>
On Tue, May 21, 2024 at 09:16:19AM +0200, Thomas Hellström wrote:
> To be able to handle list unlocking while traversing the LRU
> list, we want the iterators not only to point to the next
> position of the list traversal, but to insert themselves as
> list nodes at that point to work around the fact that the
> next node might otherwise disappear from the list while
> the iterator is pointing to it.
>
> These list nodes need to be easily distinguishable from other
> list nodes so that others traversing the list can skip
> over them.
>
> So declare a struct ttm_lru_item, with a struct list_head member
> and a type enum. This will slightly increase the size of a
> struct ttm_resource.
>
> Changes in previous series:
> - Update enum ttm_lru_item_type documentation.
> v3:
> - Introduce ttm_lru_first_res_or_null()
> (Christian König, Thomas Hellström)
>
> Cc: Christian König <christian.koenig@amd.com>
> Cc: Somalapuram Amaranath <Amaranath.Somalapuram@amd.com>
> Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
> Cc: <dri-devel@lists.freedesktop.org>
> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> ---
> drivers/gpu/drm/ttm/ttm_device.c | 4 +-
> drivers/gpu/drm/ttm/ttm_resource.c | 89 +++++++++++++++++++++++-------
> include/drm/ttm/ttm_resource.h | 54 +++++++++++++++++-
> 3 files changed, 125 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
> index 434cf0258000..09411978a13a 100644
> --- a/drivers/gpu/drm/ttm/ttm_device.c
> +++ b/drivers/gpu/drm/ttm/ttm_device.c
> @@ -274,14 +274,14 @@ static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
> struct ttm_resource *res;
>
> spin_lock(&bdev->lru_lock);
> - while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
> + while ((res = ttm_lru_first_res_or_null(list))) {
> struct ttm_buffer_object *bo = res->bo;
>
> /* Take ref against racing releases once lru_lock is unlocked */
> if (!ttm_bo_get_unless_zero(bo))
> continue;
>
> - list_del_init(&res->lru);
> + list_del_init(&bo->resource->lru.link);
> spin_unlock(&bdev->lru_lock);
>
> if (bo->ttm)
> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> index 4a66b851b67d..db9a7a3717c4 100644
> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> @@ -70,8 +70,8 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
> dma_resv_assert_held(pos->last->bo->base.resv);
>
> man = ttm_manager_type(pos->first->bo->bdev, i);
> - list_bulk_move_tail(&man->lru[j], &pos->first->lru,
> - &pos->last->lru);
> + list_bulk_move_tail(&man->lru[j], &pos->first->lru.link,
> + &pos->last->lru.link);
> }
> }
> }
> @@ -84,14 +84,38 @@ ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
> return &bulk->pos[res->mem_type][res->bo->priority];
> }
>
> +/* Return the previous resource on the list (skip over non-resource list items) */
> +static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
> +{
> + struct ttm_lru_item *lru = &cur->lru;
> +
> + do {
> + lru = list_prev_entry(lru, link);
> + } while (!ttm_lru_item_is_res(lru));
> +
> + return ttm_lru_item_to_res(lru);
> +}
> +
> +/* Return the next resource on the list (skip over non-resource list items) */
> +static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
> +{
> + struct ttm_lru_item *lru = &cur->lru;
> +
> + do {
> + lru = list_next_entry(lru, link);
> + } while (!ttm_lru_item_is_res(lru));
> +
> + return ttm_lru_item_to_res(lru);
> +}
> +
> /* Move the resource to the tail of the bulk move range */
> static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
> struct ttm_resource *res)
> {
> if (pos->last != res) {
> if (pos->first == res)
> - pos->first = list_next_entry(res, lru);
> - list_move(&res->lru, &pos->last->lru);
> + pos->first = ttm_lru_next_res(res);
> + list_move(&res->lru.link, &pos->last->lru.link);
> pos->last = res;
> }
> }
> @@ -122,11 +146,11 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
> pos->first = NULL;
> pos->last = NULL;
> } else if (pos->first == res) {
> - pos->first = list_next_entry(res, lru);
> + pos->first = ttm_lru_next_res(res);
> } else if (pos->last == res) {
> - pos->last = list_prev_entry(res, lru);
> + pos->last = ttm_lru_prev_res(res);
> } else {
> - list_move(&res->lru, &pos->last->lru);
> + list_move(&res->lru.link, &pos->last->lru.link);
> }
> }
>
> @@ -155,7 +179,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
> lockdep_assert_held(&bo->bdev->lru_lock);
>
> if (bo->pin_count) {
> - list_move_tail(&res->lru, &bdev->pinned);
> + list_move_tail(&res->lru.link, &bdev->pinned);
>
> } else if (bo->bulk_move) {
> struct ttm_lru_bulk_move_pos *pos =
> @@ -166,7 +190,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
> struct ttm_resource_manager *man;
>
> man = ttm_manager_type(bdev, res->mem_type);
> - list_move_tail(&res->lru, &man->lru[bo->priority]);
> + list_move_tail(&res->lru.link, &man->lru[bo->priority]);
> }
> }
>
> @@ -197,9 +221,9 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
> man = ttm_manager_type(bo->bdev, place->mem_type);
> spin_lock(&bo->bdev->lru_lock);
> if (bo->pin_count)
> - list_add_tail(&res->lru, &bo->bdev->pinned);
> + list_add_tail(&res->lru.link, &bo->bdev->pinned);
> else
> - list_add_tail(&res->lru, &man->lru[bo->priority]);
> + list_add_tail(&res->lru.link, &man->lru[bo->priority]);
> man->usage += res->size;
> spin_unlock(&bo->bdev->lru_lock);
> }
> @@ -221,7 +245,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
> struct ttm_device *bdev = man->bdev;
>
> spin_lock(&bdev->lru_lock);
> - list_del_init(&res->lru);
> + list_del_init(&res->lru.link);
> man->usage -= res->size;
> spin_unlock(&bdev->lru_lock);
> }
> @@ -472,14 +496,16 @@ struct ttm_resource *
> ttm_resource_manager_first(struct ttm_resource_manager *man,
> struct ttm_resource_cursor *cursor)
> {
> - struct ttm_resource *res;
> + struct ttm_lru_item *lru;
>
> lockdep_assert_held(&man->bdev->lru_lock);
>
> for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
> ++cursor->priority)
> - list_for_each_entry(res, &man->lru[cursor->priority], lru)
> - return res;
> + list_for_each_entry(lru, &man->lru[cursor->priority], link) {
> + if (ttm_lru_item_is_res(lru))
> + return ttm_lru_item_to_res(lru);
> + }
>
> return NULL;
> }
> @@ -498,15 +524,40 @@ ttm_resource_manager_next(struct ttm_resource_manager *man,
> struct ttm_resource_cursor *cursor,
> struct ttm_resource *res)
> {
> + struct ttm_lru_item *lru = &res->lru;
> +
> lockdep_assert_held(&man->bdev->lru_lock);
>
> - list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
> - return res;
> + list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
> + if (ttm_lru_item_is_res(lru))
> + return ttm_lru_item_to_res(lru);
> + }
>
> for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
> ++cursor->priority)
> - list_for_each_entry(res, &man->lru[cursor->priority], lru)
> - return res;
> + list_for_each_entry(lru, &man->lru[cursor->priority], link) {
> + if (ttm_lru_item_is_res(lru))
> + ttm_lru_item_to_res(lru);
> + }
> +
> + return NULL;
> +}
> +
> +/**
> + * ttm_lru_first_res_or_null() - Return the first resource on an lru list
> + * @head: The list head of the lru list.
> + *
> + * Return: Pointer to the first resource on the lru list or NULL if
> + * there is none.
> + */
> +struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head)
> +{
> + struct ttm_lru_item *lru;
> +
> + list_for_each_entry(lru, head, link) {
> + if (ttm_lru_item_is_res(lru))
> + return ttm_lru_item_to_res(lru);
> + }
>
> return NULL;
> }
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index 69769355139f..1511d91e290d 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -49,6 +49,43 @@ struct io_mapping;
> struct sg_table;
> struct scatterlist;
>
> +/**
> + * enum ttm_lru_item_type - enumerate ttm_lru_item subclasses
> + */
> +enum ttm_lru_item_type {
> + /** @TTM_LRU_RESOURCE: The resource subclass */
> + TTM_LRU_RESOURCE,
> + /** @TTM_LRU_HITCH: The iterator hitch subclass */
> + TTM_LRU_HITCH
> +};
> +
> +/**
> + * struct ttm_lru_item - The TTM lru list node base class
> + * @link: The list link
> + * @type: The subclass type
> + */
> +struct ttm_lru_item {
> + struct list_head link;
> + enum ttm_lru_item_type type;
> +};
> +
> +/**
> + * ttm_lru_item_init() - initialize a struct ttm_lru_item
> + * @item: The item to initialize
> + * @type: The subclass type
> + */
> +static inline void ttm_lru_item_init(struct ttm_lru_item *item,
> + enum ttm_lru_item_type type)
> +{
> + item->type = type;
> + INIT_LIST_HEAD(&item->link);
> +}
> +
> +static inline bool ttm_lru_item_is_res(const struct ttm_lru_item *item)
> +{
> + return item->type == TTM_LRU_RESOURCE;
> +}
> +
> struct ttm_resource_manager_func {
> /**
> * struct ttm_resource_manager_func member alloc
> @@ -217,9 +254,21 @@ struct ttm_resource {
> /**
> * @lru: Least recently used list, see &ttm_resource_manager.lru
> */
> - struct list_head lru;
> + struct ttm_lru_item lru;
> };
>
> +/**
> + * ttm_lru_item_to_res() - Downcast a struct ttm_lru_item to a struct ttm_resource
> + * @item: The struct ttm_lru_item to downcast
> + *
> + * Return: Pointer to the embedding struct ttm_resource
> + */
> +static inline struct ttm_resource *
> +ttm_lru_item_to_res(struct ttm_lru_item *item)
> +{
> + return container_of(item, struct ttm_resource, lru);
> +}
> +
> /**
> * struct ttm_resource_cursor
> *
> @@ -393,6 +442,9 @@ ttm_resource_manager_next(struct ttm_resource_manager *man,
> struct ttm_resource_cursor *cursor,
> struct ttm_resource *res);
>
> +struct ttm_resource *
> +ttm_lru_first_res_or_null(struct list_head *head);
> +
> /**
> * ttm_resource_manager_for_each_res - iterate over all resources
> * @man: the resource manager
> --
> 2.44.0
>
next prev parent reply other threads:[~2024-05-21 13:13 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-21 7:16 [PATCH v3 00/21] TTM shrinker helpers and xe buffer object shrinker Thomas Hellström
2024-05-21 7:16 ` [PATCH v3 01/21] drm/ttm: Allow TTM LRU list nodes of different types Thomas Hellström
2024-05-21 13:12 ` Matthew Brost [this message]
2024-05-28 9:16 ` Christian König
2024-05-21 7:16 ` [PATCH v3 02/21] drm/ttm: Slightly clean up LRU list iteration Thomas Hellström
2024-05-21 15:39 ` Matthew Brost
2024-05-28 9:19 ` Christian König
2024-05-21 7:16 ` [PATCH v3 03/21] drm/ttm: Use LRU hitches Thomas Hellström
2024-05-21 16:09 ` Matthew Brost
2024-05-21 7:16 ` [PATCH v3 04/21] drm/ttm, drm/amdgpu, drm/xe: Consider hitch moves within bulk sublist moves Thomas Hellström
2024-05-21 7:16 ` [PATCH v3 05/21] drm/ttm: Provide a generic LRU walker helper Thomas Hellström
2024-05-21 7:16 ` [PATCH v3 06/21] drm/ttm: Use the LRU walker helper for swapping Thomas Hellström
2024-05-21 7:16 ` [PATCH v3 07/21] drm/ttm: Use the LRU walker for eviction Thomas Hellström
2024-05-21 7:16 ` [PATCH v3 08/21] drm/ttm: Add a virtual base class for graphics memory backup Thomas Hellström
2024-05-21 7:16 ` [PATCH v3 09/21] drm/ttm/pool: Provide a helper to shrink pages Thomas Hellström
2024-05-21 7:16 ` [PATCH v3 10/21] drm/ttm: Use fault-injection to test error paths Thomas Hellström
2024-05-21 7:16 ` [PATCH v3 11/21] drm/ttm, drm/xe: Add a shrinker for xe bos Thomas Hellström
2024-05-21 7:16 ` [RFC PATCH v3 12/21] dma-buf/dma-resv: Introduce dma_resv_trylock_ctx() Thomas Hellström
2024-05-21 7:16 ` [RFC PATCH v3 13/21] drm/exec: Rework contended locking Thomas Hellström
2024-05-22 5:52 ` Christian König
2024-05-22 14:32 ` Thomas Hellström
2024-05-22 16:52 ` Christian König
2024-05-22 17:42 ` Thomas Hellström
2024-05-28 6:36 ` Thomas Hellström
2024-05-28 6:51 ` Christian König
2024-05-28 8:07 ` Thomas Hellström
2024-05-28 11:03 ` Christian König
2024-05-29 7:18 ` Thomas Hellström
2024-05-21 7:16 ` [RFC PATCH v3 14/21] drm/exec: Introduce a drm_exec_trylock_obj() function Thomas Hellström
2024-05-21 7:16 ` [RFC PATCH v3 15/21] drm/exec: Add a snapshot capability Thomas Hellström
2024-05-22 11:27 ` Christian König
2024-05-22 13:54 ` Thomas Hellström
2024-05-22 14:41 ` Thomas Hellström
2024-05-21 7:16 ` [RFC PATCH v3 16/21] drm/exec: Introduce an evict mode Thomas Hellström
2024-05-22 13:28 ` Christian König
2024-05-22 13:44 ` Thomas Hellström
2024-05-21 7:16 ` [RFC PATCH v3 17/21] drm/ttm: Support drm_exec locking for eviction and swapping Thomas Hellström
2024-05-21 7:16 ` [RFC PATCH v3 18/21] drm/ttm: Convert ttm vm to using drm_exec Thomas Hellström
2024-05-21 7:16 ` [RFC PATCH v3 19/21] drm/xe: Use drm_exec for fault locking Thomas Hellström
2024-05-21 7:16 ` [RFC PATCH v3 20/21] drm/ttm: Use drm_exec_trylock for bo initialization Thomas Hellström
2024-05-21 7:16 ` [RFC PATCH v3 21/21] drm/xe: Initial support for drm exec locking during validate Thomas Hellström
2024-05-21 7:22 ` [PATCH v3 00/21] TTM shrinker helpers and xe buffer object shrinker Thomas Hellström
2024-05-21 7:23 ` ✓ CI.Patch_applied: success for TTM shrinker helpers and xe buffer object shrinker (rev3) Patchwork
2024-05-21 7:24 ` ✗ CI.checkpatch: warning " Patchwork
2024-05-21 7:25 ` ✓ CI.KUnit: success " Patchwork
2024-05-21 7:37 ` ✓ CI.Build: " Patchwork
2024-05-21 7:39 ` ✗ CI.Hooks: failure " Patchwork
2024-05-21 7:41 ` ✗ CI.checksparse: warning " Patchwork
2024-05-21 8:03 ` ✓ CI.BAT: success " Patchwork
2024-05-21 9:09 ` ✗ CI.FULL: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ZkydweaFUX/2kEw3@DUT025-TGLU.fm.intel.com \
--to=matthew.brost@intel.com \
--cc=Amaranath.Somalapuram@amd.com \
--cc=christian.koenig@amd.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=thomas.hellstrom@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox