From: "Welty, Brian" <brian.welty@intel.com>
To: Pallavi Mishra <pallavi.mishra@intel.com>,
<intel-xe@lists.freedesktop.org>
Subject: Re: [PATCH v2 1/3] drm/xe/uapi: CLOS uapi support
Date: Thu, 11 Jan 2024 17:04:40 -0800 [thread overview]
Message-ID: <fcc548a2-e79e-4981-9e85-45425c6ea4b1@intel.com> (raw)
In-Reply-To: <20240109235758.1432987-2-pallavi.mishra@intel.com>
On 1/9/2024 3:57 PM, Pallavi Mishra wrote:
> Introduce Class of Service (CLOS) uapi. This allows
> Apps to reserve portions of the GPU Caches for
> exclusive use
I think make this the last patch in the series...
so advertise uapi last after the code is in place in KMD.
>
> Signed-off-by: Pallavi Mishra <pallavi.mishra@intel.com>
> ---
> include/uapi/drm/xe_drm.h | 71 +++++++++++++++++++++++++++++++++++++++
> 1 file changed, 71 insertions(+)
>
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index 50bbea0992d9..7d5656a55ddf 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -100,6 +100,10 @@ extern "C" {
> #define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
> #define DRM_XE_EXEC 0x09
> #define DRM_XE_WAIT_USER_FENCE 0x0a
> +#define DRM_XE_CLOS_RESERVE 0x0b
> +#define DRM_XE_CLOS_FREE 0x0c
> +#define DRM_XE_CLOS_SET_WAYS 0x0d
> +
> /* Must be kept compact -- no holes */
>
> #define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
> @@ -113,6 +117,9 @@ extern "C" {
> #define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
> #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
> #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
> +#define DRM_IOCTL_XE_CLOS_RESERVE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_CLOS_RESERVE, struct drm_xe_clos_reserve)
> +#define DRM_IOCTL_XE_CLOS_FREE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_CLOS_FREE, struct drm_xe_clos_free)
> +#define DRM_IOCTL_XE_CLOS_SET_WAYS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_CLOS_SET_WAYS, struct drm_xe_clos_set_ways)
>
> /**
> * DOC: Xe IOCTL Extensions
> @@ -1339,6 +1346,70 @@ struct drm_xe_wait_user_fence {
> __u64 reserved[2];
> };
>
> +/**
> + * struct drm_xe_clos_reserve
> + *
> + * Allows clients to request reservation of one free CLOS, to use in subsequent
> + * Cache Reservations.
> + *
> + */
> +struct drm_xe_clos_reserve {
> + /** @clos_index: clos index for reservation */
> + __u16 clos_index;
> +
> + /** @pad: MBZ */
> + __u16 pad16;
> +};
> +
> +/**
> + * struct drm_xe_clos_free
> + *
> + * Free off a previously reserved CLOS set. Any corresponding Cache Reservations
> + * that are active for the CLOS are automatically dropped and returned to the
> + * Shared set.
> + *
> + * The clos_index indicates the CLOS set which is being released and must
> + * correspond to a CLOS index previously reserved.
> + *
> + */
> +struct drm_xe_clos_free {
> + /** clos_index: free clos index */
> + __u16 clos_index;
> +
> + /** @pad: MBZ */
> + __u16 pad16;
> +};
> +
> +/**
> + * struct drm_xe_clos_set_ways
> + *
> + * Allows clients to request, or release, reservation of one or more cache ways,
> + * within a previously reserved CLOS set.
> + *
> + * If num_ways = 0, KMD will drop any existing Reservation for the specified
> + * clos_index and cache_level. The requested clos_index and cache_level Waymasks
> + * will then track the Shared set once again.
> + *
> + * Otherwise, the requested number of Ways will be removed from the Shared set
> + * for the requested cache level, and assigned to the Cache and CLOS specified
> + * by cache_level/clos_index.
> + *
> + */
> +struct drm_xe_clos_set_ways {
> + /** @clos_index: reserved clos index */
> + __u16 clos_index;
> +
> + /** @cache_level: level of cache */
> + __u16 cache_level; /* e.g. 3 for L3 */
> +
> + /** @num_ways: cache ways */
> + __u16 num_ways;
> +
> + /** @pad: MBZ */
> + __u16 pad16;
> +};
> +
> +
> #if defined(__cplusplus)
> }
> #endif
next prev parent reply other threads:[~2024-01-12 1:05 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-09 23:57 [PATCH v2 0/3] drm/xe: CLOS Based Cache Reservation support Pallavi Mishra
2024-01-09 23:57 ` [PATCH v2 1/3] drm/xe/uapi: CLOS uapi support Pallavi Mishra
2024-01-12 1:04 ` Welty, Brian [this message]
2024-01-09 23:57 ` [PATCH v2 2/3] drm/xe: Introduce xe_clos.c Pallavi Mishra
2024-01-12 0:41 ` Welty, Brian
2024-01-12 18:44 ` Mishra, Pallavi
2024-01-09 23:57 ` [PATCH v2 3/3] drm/xe: Add CLOS specific initializations Pallavi Mishra
2024-01-12 1:02 ` Welty, Brian
2024-01-12 18:59 ` Mishra, Pallavi
2024-01-12 19:46 ` Welty, Brian
2024-01-16 18:49 ` Mishra, Pallavi
2024-01-10 1:49 ` ✓ CI.Patch_applied: success for drm/xe: CLOS Based Cache Reservation support Patchwork
2024-01-10 1:49 ` ✗ CI.checkpatch: warning " Patchwork
2024-01-10 1:50 ` ✓ CI.KUnit: success " Patchwork
2024-01-10 1:58 ` ✓ CI.Build: " Patchwork
2024-01-10 1:58 ` ✗ CI.Hooks: failure " Patchwork
2024-01-10 1:59 ` ✓ CI.checksparse: success " Patchwork
2024-01-10 2:35 ` ✗ CI.BAT: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=fcc548a2-e79e-4981-9e85-45425c6ea4b1@intel.com \
--to=brian.welty@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=pallavi.mishra@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox