* [igt-dev] [PATCH i-g-t 01/10] include: temporary import of headers
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
@ 2019-07-25 10:30 ` Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 02/10] tests/i915-query: add new tests for perf configurations queries Lionel Landwerlin
` (11 subsequent siblings)
12 siblings, 0 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:30 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
include/drm-uapi/amdgpu_drm.h | 56 ++++++++-
include/drm-uapi/drm.h | 38 +++++++
include/drm-uapi/drm_mode.h | 121 +++++++++++++++++++-
include/drm-uapi/i915_drm.h | 194 +++++++++++++++++++++++++++++++-
include/drm-uapi/msm_drm.h | 14 +++
include/drm-uapi/nouveau_drm.h | 51 +++++++++
include/drm-uapi/panfrost_drm.h | 24 ++++
include/drm-uapi/v3d_drm.h | 28 +++++
include/drm-uapi/vmwgfx_drm.h | 4 +-
9 files changed, 522 insertions(+), 8 deletions(-)
diff --git a/include/drm-uapi/amdgpu_drm.h b/include/drm-uapi/amdgpu_drm.h
index be84e43c..d799858b 100644
--- a/include/drm-uapi/amdgpu_drm.h
+++ b/include/drm-uapi/amdgpu_drm.h
@@ -210,6 +210,9 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
/* indicate some job from this context once cause gpu hang */
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
+/* indicate some errors are detected by RAS */
+#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
+#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
@@ -272,13 +275,14 @@ union drm_amdgpu_vm {
/* sched ioctl */
#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1
+#define AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE 2
struct drm_amdgpu_sched_in {
/* AMDGPU_SCHED_OP_* */
__u32 op;
__u32 fd;
__s32 priority;
- __u32 flags;
+ __u32 ctx_id;
};
union drm_amdgpu_sched {
@@ -523,6 +527,9 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
+#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@@ -565,6 +572,11 @@ union drm_amdgpu_cs {
* caches (L2/vL1/sL1/I$). */
#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
+/* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before PACKET3_INDIRECT_BUFFER.
+ * This will reset wave ID counters for the IB.
+ */
+#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
+
struct drm_amdgpu_cs_chunk_ib {
__u32 _pad;
/** AMDGPU_IB_FLAG_* */
@@ -598,6 +610,12 @@ struct drm_amdgpu_cs_chunk_sem {
__u32 handle;
};
+struct drm_amdgpu_cs_chunk_syncobj {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+};
+
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
@@ -673,6 +691,7 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
/* Subquery id: Query DMCU firmware version */
#define AMDGPU_INFO_FW_DMCU 0x12
+ #define AMDGPU_INFO_FW_TA 0x13
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
@@ -726,6 +745,37 @@ struct drm_amdgpu_cs_chunk_data {
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
+/* query ras mask of enabled features*/
+#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
+
+/* RAS MASK: UMC (VRAM) */
+#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
+/* RAS MASK: SDMA */
+#define AMDGPU_INFO_RAS_ENABLED_SDMA (1 << 1)
+/* RAS MASK: GFX */
+#define AMDGPU_INFO_RAS_ENABLED_GFX (1 << 2)
+/* RAS MASK: MMHUB */
+#define AMDGPU_INFO_RAS_ENABLED_MMHUB (1 << 3)
+/* RAS MASK: ATHUB */
+#define AMDGPU_INFO_RAS_ENABLED_ATHUB (1 << 4)
+/* RAS MASK: PCIE */
+#define AMDGPU_INFO_RAS_ENABLED_PCIE (1 << 5)
+/* RAS MASK: HDP */
+#define AMDGPU_INFO_RAS_ENABLED_HDP (1 << 6)
+/* RAS MASK: XGMI */
+#define AMDGPU_INFO_RAS_ENABLED_XGMI (1 << 7)
+/* RAS MASK: DF */
+#define AMDGPU_INFO_RAS_ENABLED_DF (1 << 8)
+/* RAS MASK: SMN */
+#define AMDGPU_INFO_RAS_ENABLED_SMN (1 << 9)
+/* RAS MASK: SEM */
+#define AMDGPU_INFO_RAS_ENABLED_SEM (1 << 10)
+/* RAS MASK: MP0 */
+#define AMDGPU_INFO_RAS_ENABLED_MP0 (1 << 11)
+/* RAS MASK: MP1 */
+#define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
+/* RAS MASK: FUSE */
+#define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -862,6 +912,7 @@ struct drm_amdgpu_info_firmware {
#define AMDGPU_VRAM_TYPE_HBM 6
#define AMDGPU_VRAM_TYPE_DDR3 7
#define AMDGPU_VRAM_TYPE_DDR4 8
+#define AMDGPU_VRAM_TYPE_GDDR6 9
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -941,6 +992,8 @@ struct drm_amdgpu_info_device {
__u64 high_va_offset;
/** The maximum high virtual address */
__u64 high_va_max;
+ /* gfx10 pa_sc_tile_steering_override */
+ __u32 pa_sc_tile_steering_override;
};
struct drm_amdgpu_info_hw_ip {
@@ -994,6 +1047,7 @@ struct drm_amdgpu_info_vce_clock_table {
#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
#define AMDGPU_FAMILY_AI 141 /* Vega10 */
#define AMDGPU_FAMILY_RV 142 /* Raven */
+#define AMDGPU_FAMILY_NV 143 /* Navi10 */
#if defined(__cplusplus)
}
diff --git a/include/drm-uapi/drm.h b/include/drm-uapi/drm.h
index 85c685a2..438abde3 100644
--- a/include/drm-uapi/drm.h
+++ b/include/drm-uapi/drm.h
@@ -44,6 +44,7 @@ typedef unsigned int drm_handle_t;
#else /* One of the BSDs */
+#include <stdint.h>
#include <sys/ioccom.h>
#include <sys/types.h>
typedef int8_t __s8;
@@ -643,6 +644,7 @@ struct drm_gem_open {
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
#define DRM_CAP_SYNCOBJ 0x13
+#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -729,8 +731,18 @@ struct drm_syncobj_handle {
__u32 pad;
};
+struct drm_syncobj_transfer {
+ __u32 src_handle;
+ __u32 dst_handle;
+ __u64 src_point;
+ __u64 dst_point;
+ __u32 flags;
+ __u32 pad;
+};
+
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@@ -741,12 +753,33 @@ struct drm_syncobj_wait {
__u32 pad;
};
+struct drm_syncobj_timeline_wait {
+ __u64 handles;
+ /* wait on specific timeline point for every handles*/
+ __u64 points;
+ /* absolute timeout */
+ __s64 timeout_nsec;
+ __u32 count_handles;
+ __u32 flags;
+ __u32 first_signaled; /* only valid when not waiting all */
+ __u32 pad;
+};
+
+
struct drm_syncobj_array {
__u64 handles;
__u32 count_handles;
__u32 pad;
};
+struct drm_syncobj_timeline_array {
+ __u64 handles;
+ __u64 points;
+ __u32 count_handles;
+ __u32 pad;
+};
+
+
/* Query current scanout sequence number */
struct drm_crtc_get_sequence {
__u32 crtc_id; /* requested crtc_id */
@@ -903,6 +936,11 @@ extern "C" {
#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
+#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
+#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/include/drm-uapi/drm_mode.h b/include/drm-uapi/drm_mode.h
index a439c2e6..5ab331e5 100644
--- a/include/drm-uapi/drm_mode.h
+++ b/include/drm-uapi/drm_mode.h
@@ -33,7 +33,15 @@
extern "C" {
#endif
-#define DRM_DISPLAY_INFO_LEN 32
+/**
+ * DOC: overview
+ *
+ * DRM exposes many UAPI and structure definition to have a consistent
+ * and standardized interface with user.
+ * Userspace can refer to these structure definitions and UAPI formats
+ * to communicate to driver
+ */
+
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
@@ -622,7 +630,8 @@ struct drm_color_ctm {
struct drm_color_lut {
/*
- * Data is U0.16 fixed point format.
+ * Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
+ * 0xffff == 1.0.
*/
__u16 red;
__u16 green;
@@ -630,6 +639,92 @@ struct drm_color_lut {
__u16 reserved;
};
+/**
+ * struct hdr_metadata_infoframe - HDR Metadata Infoframe Data.
+ *
+ * HDR Metadata Infoframe as per CTA 861.G spec. This is expected
+ * to match exactly with the spec.
+ *
+ * Userspace is expected to pass the metadata information as per
+ * the format described in this structure.
+ */
+struct hdr_metadata_infoframe {
+ /**
+ * @eotf: Electro-Optical Transfer Function (EOTF)
+ * used in the stream.
+ */
+ __u8 eotf;
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u8 metadata_type;
+ /**
+ * @display_primaries: Color Primaries of the Data.
+ * These are coded as unsigned 16-bit values in units of
+ * 0.00002, where 0x0000 represents zero and 0xC350
+ * represents 1.0000.
+ * @display_primaries.x: X cordinate of color primary.
+ * @display_primaries.y: Y cordinate of color primary.
+ */
+ struct {
+ __u16 x, y;
+ } display_primaries[3];
+ /**
+ * @white_point: White Point of Colorspace Data.
+ * These are coded as unsigned 16-bit values in units of
+ * 0.00002, where 0x0000 represents zero and 0xC350
+ * represents 1.0000.
+ * @white_point.x: X cordinate of whitepoint of color primary.
+ * @white_point.y: Y cordinate of whitepoint of color primary.
+ */
+ struct {
+ __u16 x, y;
+ } white_point;
+ /**
+ * @max_display_mastering_luminance: Max Mastering Display Luminance.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_display_mastering_luminance;
+ /**
+ * @min_display_mastering_luminance: Min Mastering Display Luminance.
+ * This value is coded as an unsigned 16-bit value in units of
+ * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF
+ * represents 6.5535 cd/m2.
+ */
+ __u16 min_display_mastering_luminance;
+ /**
+ * @max_cll: Max Content Light Level.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_cll;
+ /**
+ * @max_fall: Max Frame Average Light Level.
+ * This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
+ * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
+ */
+ __u16 max_fall;
+};
+
+/**
+ * struct hdr_output_metadata - HDR output metadata
+ *
+ * Metadata Information to be passed from userspace
+ */
+struct hdr_output_metadata {
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u32 metadata_type;
+ /**
+ * @hdmi_metadata_type1: HDR Metadata Infoframe.
+ */
+ union {
+ struct hdr_metadata_infoframe hdmi_metadata_type1;
+ };
+};
+
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
@@ -803,6 +898,10 @@ struct drm_format_modifier {
};
/**
+ * struct drm_mode_create_blob - Create New block property
+ * @data: Pointer to data to copy.
+ * @length: Length of data to copy.
+ * @blob_id: new property ID.
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
*/
@@ -816,6 +915,8 @@ struct drm_mode_create_blob {
};
/**
+ * struct drm_mode_destroy_blob - Destroy user blob
+ * @blob_id: blob_id to destroy
* Destroy a user-created blob property.
*/
struct drm_mode_destroy_blob {
@@ -823,6 +924,12 @@ struct drm_mode_destroy_blob {
};
/**
+ * struct drm_mode_create_lease - Create lease
+ * @object_ids: Pointer to array of object ids.
+ * @object_count: Number of object ids.
+ * @flags: flags for new FD.
+ * @lessee_id: unique identifier for lessee.
+ * @fd: file descriptor to new drm_master file.
* Lease mode resources, creating another drm_master.
*/
struct drm_mode_create_lease {
@@ -840,6 +947,10 @@ struct drm_mode_create_lease {
};
/**
+ * struct drm_mode_list_lessees - List lessees
+ * @count_lessees: Number of lessees.
+ * @pad: pad.
+ * @lessees_ptr: Pointer to lessess.
* List lesses from a drm_master
*/
struct drm_mode_list_lessees {
@@ -860,6 +971,10 @@ struct drm_mode_list_lessees {
};
/**
+ * struct drm_mode_get_lease - Get Lease
+ * @count_objects: Number of leased objects.
+ * @pad: pad.
+ * @objects_ptr: Pointer to objects.
* Get leased objects
*/
struct drm_mode_get_lease {
@@ -880,6 +995,8 @@ struct drm_mode_get_lease {
};
/**
+ * struct drm_mode_revoke_lease - Revoke lease
+ * @lessee_id: Unique ID of lessee.
* Revoke lease
*/
struct drm_mode_revoke_lease {
diff --git a/include/drm-uapi/i915_drm.h b/include/drm-uapi/i915_drm.h
index 761517f1..4f060ed3 100644
--- a/include/drm-uapi/i915_drm.h
+++ b/include/drm-uapi/i915_drm.h
@@ -521,6 +521,7 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
+#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
#define I915_PARAM_HUC_STATUS 42
@@ -610,6 +611,29 @@ typedef struct drm_i915_irq_wait {
* See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
*/
#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
+
+/*
+ * Revision of the i915-perf uAPI. The value returned helps determine what
+ * i915-perf features are available. See drm_i915_perf_property_id.
+ */
+#define I915_PARAM_PERF_REVISION 54
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * timeline syncobj through drm_i915_gem_execbuf_ext_timeline_fences. See
+ * I915_EXEC_EXT.
+ */
+#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
+
+/*
+ * Request an i915/perf performance configuration change before running the
+ * commands given in an execbuf.
+ *
+ * Performance configuration ID and the file descriptor of the i915 perf
+ * stream are given through drm_i915_gem_execbuffer_ext_perf. See
+ * I915_EXEC_EXT.
+ */
+#define I915_PARAM_HAS_EXEC_PERF_CONFIG 56
+
/* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam {
@@ -1006,6 +1030,68 @@ struct drm_i915_gem_exec_fence {
__u32 flags;
};
+enum drm_i915_gem_execbuffer_ext {
+ /**
+ * See drm_i915_gem_execbuf_ext_timeline_fences.
+ */
+ DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES = 0,
+
+ /**
+ * See drm_i915_gem_execbuffer_perf_ext.
+ */
+ DRM_I915_GEM_EXECBUFFER_EXT_PERF,
+
+ DRM_I915_GEM_EXECBUFFER_EXT_MAX /* non-ABI */
+};
+
+/**
+ * This structure describes an array of drm_syncobj and associated points for
+ * timeline variants of drm_syncobj. It is invalid to append this structure to
+ * the execbuf if I915_EXEC_FENCE_ARRAY is set.
+ */
+struct drm_i915_gem_execbuffer_ext_timeline_fences {
+ struct i915_user_extension base;
+
+ /**
+ * Number of element in the handles_ptr & value_ptr arrays.
+ */
+ __u64 fence_count;
+
+ /**
+ * Pointer to an array of struct drm_i915_gem_exec_fence of length
+ * fence_count.
+ */
+ __u64 handles_ptr;
+
+ /**
+ * Pointer to an array of u64 values of length fence_count. Values
+ * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
+ * drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
+ */
+ __u64 values_ptr;
+};
+
+struct drm_i915_gem_execbuffer_ext_perf {
+ struct i915_user_extension base;
+
+ /**
+ * Performance file descriptor returned by DRM_IOCTL_I915_PERF_OPEN.
+ * This is used to identify that the application
+ */
+ __s32 perf_fd;
+
+ /**
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 pad;
+
+ /**
+ * OA configuration ID to switch to before executing the commands
+ * associated to the execbuf.
+ */
+ __u64 oa_config;
+};
+
struct drm_i915_gem_execbuffer2 {
/**
* List of gem_exec_object2 structs
@@ -1022,8 +1108,14 @@ struct drm_i915_gem_execbuffer2 {
__u32 num_cliprects;
/**
* This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
- * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
- * struct drm_i915_gem_exec_fence *fences.
+ * & I915_EXEC_EXT are not set.
+ *
+ * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
+ * of struct drm_i915_gem_exec_fence and num_cliprects is the length
+ * of the array.
+ *
+ * If I915_EXEC_EXT is set, then this is a pointer to a single struct
+ * drm_i915_gem_base_execbuffer_ext and num_cliprects is 0.
*/
__u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (0x3f)
@@ -1141,7 +1233,16 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_FENCE_SUBMIT (1 << 20)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
+/*
+ * Setting I915_EXEC_EXT implies that drm_i915_gem_execbuffer2.cliprects_ptr
+ * is treated as a pointer to an linked list of i915_user_extension. Each
+ * i915_user_extension node is the base of a larger structure. The list of
+ * supported structures are listed in the drm_i915_gem_execbuffer_ext
+ * enum.
+ */
+#define I915_EXEC_EXT (1 << 21)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_EXT<<1))
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1843,23 +1944,31 @@ enum drm_i915_perf_property_id {
* Open the stream for a specific context handle (as used with
* execbuffer2). A stream opened for a specific context this way
* won't typically require root privileges.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_CTX_HANDLE = 1,
/**
* A value of 1 requests the inclusion of raw OA unit reports as
* part of stream samples.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_SAMPLE_OA,
/**
* The value specifies which set of OA unit metrics should be
* be configured, defining the contents of any OA unit reports.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_OA_METRICS_SET,
/**
* The value specifies the size and layout of OA unit reports.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_OA_FORMAT,
@@ -1869,9 +1978,22 @@ enum drm_i915_perf_property_id {
* from this exponent as follows:
*
* 80ns * 2^(period_exponent + 1)
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_OA_EXPONENT,
+ /**
+ * Specifying this property is only valid when specify a context to
+ * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
+ * will hold preemption of the particular context we want to gather
+ * performance data about. The execbuf2 submissions must include a
+ * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
+ *
+ * This property is available in perf revision 2.
+ */
+ DRM_I915_PERF_PROP_HOLD_PREEMPTION,
+
DRM_I915_PERF_PROP_MAX /* non-ABI */
};
@@ -1900,6 +2022,8 @@ struct drm_i915_perf_open_param {
* to close and re-open a stream with the same configuration.
*
* It's undefined whether any pending data for the stream will be lost.
+ *
+ * This ioctl is available in perf revision 1.
*/
#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
@@ -1907,6 +2031,8 @@ struct drm_i915_perf_open_param {
* Disable data capture for a stream.
*
* It is an error to try and read a stream that is disabled.
+ *
+ * This ioctl is available in perf revision 1.
*/
#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
@@ -1983,6 +2109,7 @@ struct drm_i915_query_item {
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
#define DRM_I915_QUERY_ENGINE_INFO 2
+#define DRM_I915_QUERY_PERF_CONFIG 3
/* Must be kept compact -- no holes and well documented */
/*
@@ -1994,9 +2121,18 @@ struct drm_i915_query_item {
__s32 length;
/*
- * Unused for now. Must be cleared to zero.
+ * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ *
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
+ * following :
+ * - DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
*/
__u32 flags;
+#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
/*
* Data will be written at the location pointed by data_ptr when the
@@ -2122,6 +2258,56 @@ struct drm_i915_query_engine_info {
struct drm_i915_engine_info engines[];
};
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+ */
+struct drm_i915_query_perf_config {
+ union {
+ /*
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
+ * this fields to the number of configurations available.
+ */
+ __u64 n_configs;
+
+ /*
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
+ * i915 will use the value in this field as configuration
+ * identifier to decide what data to write into config_ptr.
+ */
+ __u64 config;
+
+ /*
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
+ * i915 will use the value in this field as configuration
+ * identifier to decide what data to write into config_ptr.
+ *
+ * String formatted like "%08x-%04x-%04x-%04x-%012x"
+ */
+ char uuid[36];
+ };
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
+ * write an array of __u64 of configuration identifiers.
+ *
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
+ * write a struct drm_i915_perf_oa_config. If the following fields of
+ * drm_i915_perf_oa_config are set not set to 0, i915 will write into
+ * the associated pointers the values of submitted when the
+ * configuration was created :
+ *
+ * - n_mux_regs
+ * - n_boolean_regs
+ * - n_flex_regs
+ */
+ __u8 data[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/drm-uapi/msm_drm.h b/include/drm-uapi/msm_drm.h
index 91a16b33..0b85ed6a 100644
--- a/include/drm-uapi/msm_drm.h
+++ b/include/drm-uapi/msm_drm.h
@@ -74,6 +74,8 @@ struct drm_msm_timespec {
#define MSM_PARAM_TIMESTAMP 0x05
#define MSM_PARAM_GMEM_BASE 0x06
#define MSM_PARAM_NR_RINGS 0x07
+#define MSM_PARAM_PP_PGTABLE 0x08 /* => 1 for per-process pagetables, else 0 */
+#define MSM_PARAM_FAULTS 0x09
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
@@ -286,6 +288,16 @@ struct drm_msm_submitqueue {
__u32 id; /* out, identifier */
};
+#define MSM_SUBMITQUEUE_PARAM_FAULTS 0
+
+struct drm_msm_submitqueue_query {
+ __u64 data;
+ __u32 id;
+ __u32 param;
+ __u32 len;
+ __u32 pad;
+};
+
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
@@ -302,6 +314,7 @@ struct drm_msm_submitqueue {
*/
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
+#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -313,6 +326,7 @@ struct drm_msm_submitqueue {
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query)
#if defined(__cplusplus)
}
diff --git a/include/drm-uapi/nouveau_drm.h b/include/drm-uapi/nouveau_drm.h
index 259588a4..9459a6e3 100644
--- a/include/drm-uapi/nouveau_drm.h
+++ b/include/drm-uapi/nouveau_drm.h
@@ -133,12 +133,63 @@ struct drm_nouveau_gem_cpu_fini {
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_NVIF 0x07
+#define DRM_NOUVEAU_SVM_INIT 0x08
+#define DRM_NOUVEAU_SVM_BIND 0x09
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
#define DRM_NOUVEAU_GEM_INFO 0x44
+struct drm_nouveau_svm_init {
+ __u64 unmanaged_addr;
+ __u64 unmanaged_size;
+};
+
+struct drm_nouveau_svm_bind {
+ __u64 header;
+ __u64 va_start;
+ __u64 va_end;
+ __u64 npages;
+ __u64 stride;
+ __u64 result;
+ __u64 reserved0;
+ __u64 reserved1;
+};
+
+#define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
+#define NOUVEAU_SVM_BIND_COMMAND_BITS 8
+#define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
+#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
+#define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
+#define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
+#define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
+#define NOUVEAU_SVM_BIND_TARGET_BITS 32
+#define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
+
+/*
+ * Below is use to validate ioctl argument, userspace can also use it to make
+ * sure that no bit are set beyond known fields for a given kernel version.
+ */
+#define NOUVEAU_SVM_BIND_VALID_BITS 48
+#define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
+
+
+/*
+ * NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
+ * result: number of page successfuly migrate to the target memory.
+ */
+#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
+
+/*
+ * NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
+ */
+#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
+
+
+#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
+#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
+
#define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
diff --git a/include/drm-uapi/panfrost_drm.h b/include/drm-uapi/panfrost_drm.h
index a52e0283..b5d37063 100644
--- a/include/drm-uapi/panfrost_drm.h
+++ b/include/drm-uapi/panfrost_drm.h
@@ -18,6 +18,8 @@ extern "C" {
#define DRM_PANFROST_MMAP_BO 0x03
#define DRM_PANFROST_GET_PARAM 0x04
#define DRM_PANFROST_GET_BO_OFFSET 0x05
+#define DRM_PANFROST_PERFCNT_ENABLE 0x06
+#define DRM_PANFROST_PERFCNT_DUMP 0x07
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -26,6 +28,15 @@ extern "C" {
#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
+/*
+ * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
+ * param is set to true.
+ * All these ioctl(s) are subject to deprecation, so please don't rely on
+ * them for anything but debugging purpose.
+ */
+#define DRM_IOCTL_PANFROST_PERFCNT_ENABLE DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_PERFCNT_ENABLE, struct drm_panfrost_perfcnt_enable)
+#define DRM_IOCTL_PANFROST_PERFCNT_DUMP DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_PERFCNT_DUMP, struct drm_panfrost_perfcnt_dump)
+
#define PANFROST_JD_REQ_FS (1 << 0)
/**
* struct drm_panfrost_submit - ioctl argument for submitting commands to the 3D
@@ -135,6 +146,19 @@ struct drm_panfrost_get_bo_offset {
__u64 offset;
};
+struct drm_panfrost_perfcnt_enable {
+ __u32 enable;
+ /*
+ * On bifrost we have 2 sets of counters, this parameter defines the
+ * one to track.
+ */
+ __u32 counterset;
+};
+
+struct drm_panfrost_perfcnt_dump {
+ __u64 buf_ptr;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/drm-uapi/v3d_drm.h b/include/drm-uapi/v3d_drm.h
index ea70669d..58fbe48c 100644
--- a/include/drm-uapi/v3d_drm.h
+++ b/include/drm-uapi/v3d_drm.h
@@ -37,6 +37,7 @@ extern "C" {
#define DRM_V3D_GET_PARAM 0x04
#define DRM_V3D_GET_BO_OFFSET 0x05
#define DRM_V3D_SUBMIT_TFU 0x06
+#define DRM_V3D_SUBMIT_CSD 0x07
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -45,6 +46,7 @@ extern "C" {
#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
+#define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd)
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -190,6 +192,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_V3D_CORE0_IDENT1,
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
DRM_V3D_PARAM_SUPPORTS_TFU,
+ DRM_V3D_PARAM_SUPPORTS_CSD,
};
struct drm_v3d_get_param {
@@ -230,6 +233,31 @@ struct drm_v3d_submit_tfu {
__u32 out_sync;
};
+/* Submits a compute shader for dispatch. This job will block on any
+ * previous compute shaders submitted on this fd, and any other
+ * synchronization must be performed with in_sync/out_sync.
+ */
+struct drm_v3d_submit_csd {
+ __u32 cfg[7];
+ __u32 coef[4];
+
+ /* Pointer to a u32 array of the BOs that are referenced by the job.
+ */
+ __u64 bo_handles;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ /* sync object to block on before running the CSD job. Each
+ * CSD job will execute in the order submitted to its FD.
+ * Synchronization against rendering/TFU jobs or CSD from
+ * other fds requires using sync objects.
+ */
+ __u32 in_sync;
+ /* Sync object to signal when the CSD job is done. */
+ __u32 out_sync;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/drm-uapi/vmwgfx_drm.h b/include/drm-uapi/vmwgfx_drm.h
index 399f5831..02cab33f 100644
--- a/include/drm-uapi/vmwgfx_drm.h
+++ b/include/drm-uapi/vmwgfx_drm.h
@@ -891,11 +891,13 @@ struct drm_vmw_shader_arg {
* surface.
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
* given.
+ * @drm_vmw_surface_flag_coherent: Back surface with coherent memory.
*/
enum drm_vmw_surface_flags {
drm_vmw_surface_flag_shareable = (1 << 0),
drm_vmw_surface_flag_scanout = (1 << 1),
- drm_vmw_surface_flag_create_buffer = (1 << 2)
+ drm_vmw_surface_flag_create_buffer = (1 << 2),
+ drm_vmw_surface_flag_coherent = (1 << 3),
};
/**
--
2.22.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 21+ messages in thread* [igt-dev] [PATCH i-g-t 02/10] tests/i915-query: add new tests for perf configurations queries
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 01/10] include: temporary import of headers Lionel Landwerlin
@ 2019-07-25 10:30 ` Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 03/10] tests/perf: add tests for holding preemption Lionel Landwerlin
` (10 subsequent siblings)
12 siblings, 0 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:30 UTC (permalink / raw)
To: igt-dev
These new tests allow to list the available configurations and also to
query the data that makes up a configuration.
v2: Verify uuid queries (Lionel)
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
tests/i915/i915_query.c | 608 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 608 insertions(+)
diff --git a/tests/i915/i915_query.c b/tests/i915/i915_query.c
index ecbec3ae..1af731d3 100644
--- a/tests/i915/i915_query.c
+++ b/tests/i915/i915_query.c
@@ -22,6 +22,7 @@
*/
#include "igt.h"
+#include "igt_sysfs.h"
#include <limits.h>
@@ -718,6 +719,598 @@ static void engines(int fd)
free(engines);
}
+static bool query_perf_config_supported(int fd)
+{
+ struct drm_i915_query_item item = {
+ .query_id = DRM_I915_QUERY_PERF_CONFIG,
+ .flags = DRM_I915_QUERY_PERF_CONFIG_LIST,
+ };
+
+ return __i915_query_items(fd, &item, 1) == 0 && item.length > 0;
+}
+
+/*
+ * Verify that perf configuration queries for list of configurations
+ * rejects invalid parameters.
+ */
+static void test_query_perf_config_list_invalid(int fd)
+{
+ struct drm_i915_query_perf_config *query_config_ptr;
+ struct drm_i915_query_item item;
+ size_t len;
+ void *data;
+
+ /* Verify invalid flags for perf config queries */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = 42; /* invalid */
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ /*
+ * A too small data length is invalid. We should have at least
+ * the test config list.
+ */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_LIST;
+ item.length = sizeof(struct drm_i915_query_perf_config); /* invalid */
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ /* Flags on the query config data are invalid. */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_LIST;
+ item.length = 0;
+ i915_query_items(fd, &item, 1);
+ igt_assert(item.length > sizeof(struct drm_i915_query_perf_config));
+
+ query_config_ptr = calloc(1, item.length);
+ query_config_ptr->flags = 1; /* invalid */
+ item.data_ptr = to_user_pointer(query_config_ptr);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+ free(query_config_ptr);
+
+ /*
+ * A NULL data pointer is invalid when the length is long
+ * enough for i915 to copy data into the pointed memory.
+ */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_LIST;
+ item.length = 0;
+ i915_query_items(fd, &item, 1);
+ igt_assert(item.length > sizeof(struct drm_i915_query_perf_config));
+
+ i915_query_items(fd, &item, 1); /* leaves data ptr to null */
+ igt_assert_eq(item.length, -EFAULT);
+
+ /* Trying to write into read only memory will fail. */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_LIST;
+ item.length = 0;
+ i915_query_items(fd, &item, 1);
+ igt_assert(item.length > sizeof(struct drm_i915_query_perf_config));
+
+ len = ALIGN(item.length, 4096);
+ data = mmap(0, len, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ memset(data, 0, len);
+ mprotect(data, len, PROT_READ);
+ item.data_ptr = to_user_pointer(data); /* invalid with read only data */
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EFAULT);
+
+ munmap(data, len);
+}
+
+static int query_perf_config_id_data(int fd, int length,
+ struct drm_i915_query_perf_config *query)
+{
+ struct drm_i915_query_item item;
+
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID;
+ item.length = length;
+ item.data_ptr = to_user_pointer(query);
+ i915_query_items(fd, &item, 1);
+
+ return item.length;
+}
+
+static int query_perf_config_uuid_data(int fd, int length,
+ struct drm_i915_query_perf_config *query)
+{
+ struct drm_i915_query_item item;
+
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID;
+ item.length = length;
+ item.data_ptr = to_user_pointer(query);
+ i915_query_items(fd, &item, 1);
+
+ return item.length;
+}
+
+/*
+ * Verify that perf configuration queries for configuration data
+ * rejects invalid parameters.
+ */
+static void test_query_perf_config_data_invalid(int fd)
+{
+ struct {
+ struct drm_i915_query_perf_config query;
+ struct drm_i915_perf_oa_config oa;
+ } query;
+ struct drm_i915_query_item item;
+ size_t len;
+ void *data;
+
+ /* Flags are invalid for perf config queries */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID + 1; /* invalid */
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ /*
+ * A too small data length is invalid. We should have at least
+ * the test config list.
+ */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID;
+ item.length = sizeof(struct drm_i915_query_perf_config); /* invalid */
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID;
+ item.length = sizeof(struct drm_i915_query_perf_config) +
+ sizeof(struct drm_i915_perf_oa_config) - 1; /* invalid */
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ /* Flags on the query config data are invalid. */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID;
+ item.length = 0;
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, sizeof(query));
+
+ memset(&query, 0, sizeof(query));
+ query.query.flags = 1; /* invalid */
+ item.data_ptr = to_user_pointer(&query.query);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EINVAL);
+
+ /* Invalid UUID. */
+ memset(&item, 0, sizeof(item));
+ memset(&query, 0, sizeof(query));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID;
+ item.data_ptr = to_user_pointer(&query);
+ item.length = sizeof(query);
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -ENOENT);
+
+ /*
+ * A NULL data pointer is invalid when the length is long
+ * enough for i915 to copy data into the pointed memory.
+ */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID;
+ item.length = 0;
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, sizeof(query));
+
+ i915_query_items(fd, &item, 1); /* leaves data ptr to null */
+ igt_assert_eq(item.length, -EFAULT);
+
+ item.data_ptr = ULONG_MAX; /* invalid pointer */
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EFAULT);
+
+ /* Trying to write into read only memory will fail. */
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID;
+ item.length = 0;
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, sizeof(query));
+
+ len = ALIGN(item.length, 4096);
+ data = mmap(0, len, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ memset(data, 0, len);
+ ((struct drm_i915_query_perf_config *)data)->config = 1; /* test config */
+ mprotect(data, len, PROT_READ);
+ item.data_ptr = to_user_pointer(data); /* invalid with read only data */
+ i915_query_items(fd, &item, 1);
+ igt_assert_eq(item.length, -EFAULT);
+
+ munmap(data, len);
+
+ /* Invalid memory (NULL) for configuration registers. */
+ memset(&query, 0, sizeof(query));
+ query.query.config = 1; /* test config */
+ igt_assert_eq(sizeof(query),
+ query_perf_config_id_data(fd, sizeof(query), &query.query));
+
+ igt_debug("Queried test config %.*s\n",
+ (int)sizeof(query.oa.uuid), query.oa.uuid);
+ igt_debug(" n_mux_regs=%u, n_boolean_regs=%u, n_flex_regs=%u\n",
+ query.oa.n_mux_regs, query.oa.n_boolean_regs,
+ query.oa.n_flex_regs);
+ igt_assert_eq(-EFAULT,
+ query_perf_config_id_data(fd, sizeof(query), &query.query));
+
+ /* Invalid memory (ULONG max) for configuration registers. */
+ memset(&query, 0, sizeof(query));
+ query.query.config = 1; /* test config */
+ igt_assert_eq(sizeof(query), query_perf_config_id_data(fd, 0, &query.query));
+
+ if (query.oa.n_mux_regs > 0) {
+ query.oa.mux_regs_ptr = ULONG_MAX;
+ query.oa.n_boolean_regs = 0;
+ query.oa.n_flex_regs = 0;
+ igt_assert_eq(-EFAULT, query_perf_config_id_data(fd, sizeof(query),
+ &query.query));
+ }
+
+ memset(&query, 0, sizeof(query));
+ query.query.config = 1; /* test config */
+ igt_assert_eq(sizeof(query),
+ query_perf_config_id_data(fd, 0, &query.query));
+
+ if (query.oa.n_boolean_regs > 0) {
+ query.oa.boolean_regs_ptr = ULONG_MAX;
+ query.oa.n_mux_regs = 0;
+ query.oa.n_flex_regs = 0;
+ igt_assert_eq(-EFAULT, query_perf_config_id_data(fd, sizeof(query),
+ &query.query));
+ }
+
+ memset(&query, 0, sizeof(query));
+ query.query.config = 1; /* test config */
+ igt_assert_eq(sizeof(query),
+ query_perf_config_id_data(fd, 0, &query.query));
+
+ if (query.oa.n_flex_regs > 0) {
+ query.oa.flex_regs_ptr = ULONG_MAX;
+ query.oa.n_mux_regs = 0;
+ query.oa.n_boolean_regs = 0;
+ igt_assert_eq(-EFAULT, query_perf_config_id_data(fd, sizeof(query),
+ &query.query));
+ }
+
+ /* Too small number of registers to write. */
+ memset(&query, 0, sizeof(query));
+ query.query.config = 1; /* test config */
+ igt_assert_eq(sizeof(query), query_perf_config_id_data(fd, 0, &query.query));
+
+ if (query.oa.n_mux_regs > 0) {
+ query.oa.n_mux_regs--;
+ igt_assert_eq(-EINVAL, query_perf_config_id_data(fd, sizeof(query),
+ &query.query));
+ }
+
+ memset(&query, 0, sizeof(query));
+ query.query.config = 1; /* test config */
+ igt_assert_eq(sizeof(query),
+ query_perf_config_id_data(fd, 0, &query.query));
+
+ if (query.oa.n_boolean_regs > 0) {
+ query.oa.n_boolean_regs--;
+ igt_assert_eq(-EINVAL, query_perf_config_id_data(fd, sizeof(query),
+ &query.query));
+ }
+
+ memset(&query, 0, sizeof(query));
+ query.query.config = 1; /* test config */
+ igt_assert_eq(sizeof(query), query_perf_config_id_data(fd, 0, &query.query));
+
+ if (query.oa.n_flex_regs > 0) {
+ query.oa.n_flex_regs--;
+ igt_assert_eq(-EINVAL, query_perf_config_id_data(fd, sizeof(query),
+ &query.query));
+ }
+
+ memset(&query, 0, sizeof(query));
+ query.query.config = 1; /* test config */
+ igt_assert_eq(sizeof(query),
+ query_perf_config_id_data(fd, 0, &query.query));
+
+ if (query.oa.n_boolean_regs > 0) {
+ query.oa.boolean_regs_ptr = ULONG_MAX;
+ query.oa.n_mux_regs = 0;
+ query.oa.n_flex_regs = 0;
+ igt_assert_eq(-EFAULT, query_perf_config_id_data(fd, sizeof(query),
+ &query.query));
+ }
+
+ /* Read only memory for registers. */
+ memset(&query, 0, sizeof(query));
+ query.query.config = 1; /* test config */
+ igt_assert_eq(sizeof(query),
+ query_perf_config_id_data(fd, sizeof(query), &query.query));
+
+ len = ALIGN(query.oa.n_mux_regs * sizeof(uint32_t) * 2, 4096);
+ data = mmap(0, len, PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ memset(data, 0, len);
+ mprotect(data, len, PROT_READ);
+ query.oa.mux_regs_ptr = to_user_pointer(data);
+ igt_assert_eq(-EFAULT,
+ query_perf_config_id_data(fd, sizeof(query), &query.query));
+
+ munmap(data, len);
+}
+
+static uint64_t create_perf_config(int fd,
+ const char *uuid,
+ uint32_t **boolean_regs,
+ uint32_t *n_boolean_regs,
+ uint32_t **flex_regs,
+ uint32_t *n_flex_regs,
+ uint32_t **mux_regs,
+ uint32_t *n_mux_regs)
+{
+ struct drm_i915_perf_oa_config config;
+ int devid = intel_get_drm_devid(fd);
+ int i, ret;
+
+ *n_boolean_regs = rand() % 50;
+ *boolean_regs = calloc(*n_boolean_regs, sizeof(uint32_t) * 2);
+ *n_mux_regs = rand() % 50;
+ *mux_regs = calloc(*n_mux_regs, sizeof(uint32_t) * 2);
+ if (intel_gen(devid) < 8) {
+ /* flex register don't exist on gen7 */
+ *n_flex_regs = 0;
+ *flex_regs = NULL;
+ } else {
+ *n_flex_regs = rand() % 50;
+ *flex_regs = calloc(*n_flex_regs, sizeof(uint32_t) * 2);
+ }
+
+ for (i = 0; i < *n_boolean_regs; i++) {
+ if (rand() % 2) {
+ /* OASTARTTRIG[1-8] */
+ (*boolean_regs)[i * 2] =
+ 0x2710 + ((rand() % (0x2730 - 0x2710)) / 4) * 4;
+ (*boolean_regs)[i * 2 + 1] = rand();
+ } else {
+ /* OAREPORTTRIG[1-8] */
+ (*boolean_regs)[i * 2] =
+ 0x2740 + ((rand() % (0x275c - 0x2744)) / 4) * 4;
+ (*boolean_regs)[i * 2 + 1] = rand();
+ }
+ }
+
+ for (i = 0; i < *n_mux_regs; i++) {
+ (*mux_regs)[i * 2] = 0x9800;
+ (*mux_regs)[i * 2 + 1] = rand();
+ }
+
+ for (i = 0; i < *n_flex_regs; i++) {
+ const uint32_t flex[] = {
+ 0xe458,
+ 0xe558,
+ 0xe658,
+ 0xe758,
+ 0xe45c,
+ 0xe55c,
+ 0xe65c
+ };
+ (*flex_regs)[i * 2] = flex[rand() % ARRAY_SIZE(flex)];
+ (*flex_regs)[i * 2 + 1] = rand();
+ }
+
+ memset(&config, 0, sizeof(config));
+ memcpy(config.uuid, uuid, sizeof(config.uuid));
+
+ config.n_boolean_regs = *n_boolean_regs;
+ config.boolean_regs_ptr = to_user_pointer(*boolean_regs);
+ config.n_flex_regs = *n_flex_regs;
+ config.flex_regs_ptr = to_user_pointer(*flex_regs);
+ config.n_mux_regs = *n_mux_regs;
+ config.mux_regs_ptr = to_user_pointer(*mux_regs);
+
+ ret = igt_ioctl(fd, DRM_IOCTL_I915_PERF_ADD_CONFIG, &config);
+ igt_assert(ret > 1); /* Config 0/1 should be used by the kernel */
+
+ igt_debug("created config id=%i uuid=%s:\n", ret, uuid);
+ igt_debug("\tn_boolean_regs=%u n_flex_regs=%u n_mux_regs=%u\n",
+ config.n_boolean_regs, config.n_flex_regs,
+ config.n_mux_regs);
+
+ return ret;
+}
+
+static void remove_perf_config(int fd, uint64_t config_id)
+{
+ igt_assert_eq(0, igt_ioctl(fd, DRM_IOCTL_I915_PERF_REMOVE_CONFIG,
+ &config_id));
+}
+
+static uint64_t get_config_id(int fd, const char *uuid)
+{
+ char rel_path[100];
+ uint64_t ret;
+ int sysfs;
+
+ sysfs = igt_sysfs_open(fd);
+ igt_assert_lte(0, sysfs);
+
+ snprintf(rel_path, sizeof(rel_path), "metrics/%s/id", uuid);
+
+ if (igt_sysfs_scanf(sysfs, rel_path, "%lu", &ret) < 0)
+ ret = 0;
+
+ close(sysfs);
+ return ret;
+}
+
+/*
+ * Verifies that created configurations appear in the query of list of
+ * configuration and also verify the content of the queried
+ * configurations matches with what was created.
+ */
+static void test_query_perf_configs(int fd)
+{
+ struct {
+ uint64_t id;
+
+ char uuid[40];
+
+ uint32_t *boolean_regs;
+ uint32_t n_boolean_regs;
+ uint32_t *flex_regs;
+ uint32_t n_flex_regs;
+ uint32_t *mux_regs;
+ uint32_t n_mux_regs;
+ } configs[5];
+ struct {
+ struct drm_i915_query_perf_config query;
+ uint64_t config_ids[];
+ } *list_query;
+ struct drm_i915_query_item item;
+ int i;
+
+ srand(time(NULL));
+
+ for (i = 0; i < ARRAY_SIZE(configs); i++) {
+ uint64_t prev_config_id;
+
+ snprintf(configs[i].uuid, sizeof(configs[i].uuid),
+ "01234567-%04u-0123-0123-0123456789ab", i);
+
+ prev_config_id = get_config_id(fd, configs[i].uuid);
+ if (prev_config_id)
+ remove_perf_config(fd, prev_config_id);
+
+ configs[i].id =
+ create_perf_config(fd, configs[i].uuid,
+ &configs[i].boolean_regs,
+ &configs[i].n_boolean_regs,
+ &configs[i].flex_regs,
+ &configs[i].n_flex_regs,
+ &configs[i].mux_regs,
+ &configs[i].n_mux_regs);
+ }
+
+ memset(&item, 0, sizeof(item));
+ item.query_id = DRM_I915_QUERY_PERF_CONFIG;
+ item.flags = DRM_I915_QUERY_PERF_CONFIG_LIST;
+ item.length = 0;
+ i915_query_items(fd, &item, 1);
+ igt_assert(item.length > sizeof(struct drm_i915_query_perf_config));
+
+ list_query = malloc(item.length);
+ memset(list_query, 0, item.length);
+ item.data_ptr = to_user_pointer(list_query);
+ i915_query_items(fd, &item, 1);
+ igt_assert(item.length > sizeof(struct drm_i915_query_perf_config));
+
+ igt_debug("listed configs:\n");
+ for (i = 0; i < list_query->query.config; i++)
+ igt_debug("\tid=%lu\n", list_query->config_ids[i]);
+
+ /* Verify that all created configs are listed. */
+ for (i = 0; i < ARRAY_SIZE(configs); i++) {
+ int j;
+ bool found = false;
+
+ for (j = 0; j < list_query->query.config; j++) {
+ if (list_query->config_ids[j] == configs[i].id) {
+ found = true;
+ break;
+ }
+ }
+
+ igt_assert(found);
+ }
+
+ /* Verify the content of the configs. */
+ for (i = 0; i < ARRAY_SIZE(configs); i++) {
+ struct {
+ struct drm_i915_query_perf_config query;
+ struct drm_i915_perf_oa_config oa;
+ } query;
+ uint32_t *boolean_regs = NULL, *flex_regs = NULL, *mux_regs = NULL;
+
+ /* First query with configuration id. */
+ memset(&query, 0, sizeof(query));
+ query.query.config = configs[i].id;
+ igt_assert_eq(sizeof(query),
+ query_perf_config_id_data(fd, sizeof(query),
+ &query.query));
+
+ igt_debug("queried config data id=%lu uuid=%s:\n",
+ configs[i].id, configs[i].uuid);
+ igt_debug("\tn_boolean_regs=%u n_flex_regs=%u n_mux_regs=%u\n",
+ query.oa.n_boolean_regs, query.oa.n_flex_regs,
+ query.oa.n_mux_regs);
+
+ igt_assert_eq(query.oa.n_boolean_regs, configs[i].n_boolean_regs);
+ igt_assert_eq(query.oa.n_flex_regs, configs[i].n_flex_regs);
+ igt_assert_eq(query.oa.n_mux_regs, configs[i].n_mux_regs);
+
+ /* Query again with configuration uuid. */
+ memset(&query, 0, sizeof(query));
+ memcpy(query.query.uuid, configs[i].uuid,
+ sizeof(query.query.uuid));
+ igt_assert_eq(sizeof(query),
+ query_perf_config_uuid_data(fd, sizeof(query),
+ &query.query));
+
+ igt_assert_eq(query.oa.n_boolean_regs, configs[i].n_boolean_regs);
+ igt_assert_eq(query.oa.n_flex_regs, configs[i].n_flex_regs);
+ igt_assert_eq(query.oa.n_mux_regs, configs[i].n_mux_regs);
+
+ /* Now get the register programming values. */
+ boolean_regs = calloc(query.oa.n_boolean_regs * 2, sizeof(uint32_t));
+ if (query.oa.n_flex_regs > 0)
+ flex_regs = calloc(query.oa.n_flex_regs * 2, sizeof(uint32_t));
+ mux_regs = calloc(query.oa.n_mux_regs * 2, sizeof(uint32_t));
+
+ query.oa.boolean_regs_ptr = to_user_pointer(boolean_regs);
+ query.oa.flex_regs_ptr = to_user_pointer(flex_regs);
+ query.oa.mux_regs_ptr = to_user_pointer(mux_regs);
+
+ igt_assert_eq(sizeof(query),
+ query_perf_config_uuid_data(fd, sizeof(query),
+ &query.query));
+
+ igt_assert_eq(0, memcmp(configs[i].boolean_regs,
+ boolean_regs,
+ configs[i].n_boolean_regs * 2 * sizeof(uint32_t)));
+ igt_assert_eq(0, memcmp(configs[i].flex_regs,
+ flex_regs,
+ configs[i].n_flex_regs * 2 * sizeof(uint32_t)));
+ igt_assert_eq(0, memcmp(configs[i].mux_regs,
+ mux_regs,
+ configs[i].n_mux_regs * 2 * sizeof(uint32_t)));
+
+ free(boolean_regs);
+ free(flex_regs);
+ free(mux_regs);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(configs); i++) {
+ remove_perf_config(fd, configs[i].id);
+
+ free(configs[i].boolean_regs);
+ free(configs[i].flex_regs);
+ free(configs[i].mux_regs);
+ }
+}
+
igt_main
{
int fd = -1;
@@ -777,6 +1370,21 @@ igt_main
engines(fd);
}
+ igt_subtest_group {
+ igt_fixture {
+ igt_require(query_perf_config_supported(fd));
+ }
+
+ igt_subtest("query-perf-config-list-invalid")
+ test_query_perf_config_list_invalid(fd);
+
+ igt_subtest("query-perf-config-data-invalid")
+ test_query_perf_config_data_invalid(fd);
+
+ igt_subtest("query-perf-configs")
+ test_query_perf_configs(fd);
+ }
+
igt_fixture {
close(fd);
}
--
2.22.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 21+ messages in thread* [igt-dev] [PATCH i-g-t 03/10] tests/perf: add tests for holding preemption
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 01/10] include: temporary import of headers Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 02/10] tests/i915-query: add new tests for perf configurations queries Lionel Landwerlin
@ 2019-07-25 10:30 ` Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 04/10] tests/perf: add tests to new OA reconfiguration execbuf extension Lionel Landwerlin
` (9 subsequent siblings)
12 siblings, 0 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:30 UTC (permalink / raw)
To: igt-dev
Using timestamps from the engine, verify that when a context has been
flagged as holding preemption it will not be preempted by another
context of higher priority.
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
lib/intel_reg.h | 1 +
tests/perf.c | 556 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 557 insertions(+)
diff --git a/lib/intel_reg.h b/lib/intel_reg.h
index 069440cb..1526ff32 100644
--- a/lib/intel_reg.h
+++ b/lib/intel_reg.h
@@ -2596,6 +2596,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1 << 8)
#define MI_BATCH_NON_SECURE_HSW (1<<13) /* Additional bit for RCS */
+#define MI_BATCH_PREDICATE_ENABLE_HSW (1<<15) /* RCS only */
#define MAX_DISPLAY_PIPES 2
diff --git a/tests/perf.c b/tests/perf.c
index 5ad8b2db..3bbc898a 100644
--- a/tests/perf.c
+++ b/tests/perf.c
@@ -82,6 +82,86 @@ IGT_TEST_DESCRIPTION("Test the i915 perf metrics streaming interface");
#define PIPE_CONTROL_PPGTT_WRITE (0 << 2)
#define PIPE_CONTROL_GLOBAL_GTT_WRITE (1 << 2)
+#define MI_LOAD_REGISTER_REG (0x2a << 23)
+#define MI_LOAD_REGISTER_IMM_n(n_regs) ((0x22 << 23) | (1 + 2 * (n_regs) - 2))
+#define MI_LOAD_REGISTER_MEM (0x29 << 23)
+#define MI_STORE_REGISTER_MEM (0x24 << 23)
+#define MI_STORE_DATA_IMM (0x20 << 23)
+#define MI_CONDITIONAL_BATCH_BUFFER_END (0x36 << 23)
+
+#define MI_MATH(op_len) ((0x1a << 23) | (1 + (op_len) - 2))
+#define MI_ALU_INSTR(opcode, src1, src2) \
+ ((opcode << 20) | (src1 << 10) | (src2))
+
+#define MI_ALU_OPCODE_NOOP 0
+#define MI_ALU_OPCODE_LOAD 128
+#define MI_ALU_OPCODE_LOADINV 1152
+#define MI_ALU_OPCODE_LOAD0 129
+#define MI_ALU_OPCODE_LOAD1 1153
+#define MI_ALU_OPCODE_ADD 256
+#define MI_ALU_OPCODE_SUB 257
+#define MI_ALU_OPCODE_AND 258
+#define MI_ALU_OPCODE_OR 259
+#define MI_ALU_OPCODE_XOR 260
+#define MI_ALU_OPCODE_STORE 384
+#define MI_ALU_OPCODE_STOREINV 1408
+
+#define MI_ALU_OPERAND_REG0 0
+#define MI_ALU_OPERAND_REG1 1
+#define MI_ALU_OPERAND_REG2 2
+#define MI_ALU_OPERAND_REG3 3
+#define MI_ALU_OPERAND_REG4 4
+#define MI_ALU_OPERAND_REG5 5
+#define MI_ALU_OPERAND_REG6 6
+#define MI_ALU_OPERAND_REG7 7
+#define MI_ALU_OPERAND_REG8 8
+#define MI_ALU_OPERAND_REG9 9
+#define MI_ALU_OPERAND_REG10 10
+#define MI_ALU_OPERAND_REG11 11
+#define MI_ALU_OPERAND_REG12 12
+#define MI_ALU_OPERAND_REG13 13
+#define MI_ALU_OPERAND_REG14 14
+#define MI_ALU_OPERAND_REG15 15
+#define MI_ALU_OPERAND_SRCA 32
+#define MI_ALU_OPERAND_SRCB 33
+#define MI_ALU_OPERAND_ACCU 49
+#define MI_ALU_OPERAND_ZF 50
+#define MI_ALU_OPERAND_CF 51
+
+#define MI_PREDICATE (0xC << 23)
+#define MI_PREDICATE_LOADOP_KEEP (0 << 6)
+#define MI_PREDICATE_LOADOP_LOAD (2 << 6)
+#define MI_PREDICATE_LOADOP_LOADINV (3 << 6)
+#define MI_PREDICATE_COMBINEOP_SET (0 << 3)
+#define MI_PREDICATE_COMBINEOP_AND (1 << 3)
+#define MI_PREDICATE_COMBINEOP_OR (2 << 3)
+#define MI_PREDICATE_COMBINEOP_XOR (3 << 3)
+#define MI_PREDICATE_COMPAREOP_TRUE (0 << 0)
+#define MI_PREDICATE_COMPAREOP_FALSE (1 << 0)
+#define MI_PREDICATE_COMPAREOP_SRCS_EQUAL (2 << 0)
+#define MI_PREDICATE_COMPAREOP_DELTAS_EQUAL (3 << 0)
+
+#define MI_SET_PREDICATE (0x1 << 23)
+#define MI_SET_PREDICATE_NOOP_NEVER (0)
+#define MI_SET_PREDICATE_NOOP_RESULT2_CLEAR (1)
+#define MI_SET_PREDICATE_NOOP_RESULT2_SET (2)
+#define MI_SET_PREDICATE_NOOP_RESULT_CLEAR (3)
+#define MI_SET_PREDICATE_NOOP_RESULT_SET (4)
+#define MI_SET_PREDICATE_1_SLICES (5)
+#define MI_SET_PREDICATE_2_SLICES (6)
+#define MI_SET_PREDICATE_3_SLICES (7)
+
+#define MI_ARB_CHK (0x5 << 23)
+
+#define CS_GPR(n) (0x2600 + (n) * 8)
+#define RCS_TIMESTAMP (0x2000 + 0x358)
+#define MI_PREDICATE_SRC0 0x2400
+#define MI_PREDICATE_SRC1 0x2408
+#define MI_PREDICATE_DATA 0x2410
+#define MI_PREDICATE_RESULT 0x2418
+#define MI_PREDICATE_RESULT_1 0x241C
+#define MI_PREDICATE_RESULT_2 0x2214
+
#define MAX_OA_BUF_SIZE (16 * 1024 * 1024)
struct accumulator {
@@ -367,6 +447,12 @@ timebase_scale(uint32_t u32_delta)
return ((uint64_t)u32_delta * NSEC_PER_SEC) / timestamp_frequency;
}
+static uint64_t
+time_to_gpu_ticks(uint64_t ns)
+{
+ return (ns * timestamp_frequency) / NSEC_PER_SEC;
+}
+
/* Returns: the largest OA exponent that will still result in a sampling period
* less than or equal to the given @period.
*/
@@ -3952,6 +4038,461 @@ test_whitelisted_registers_userspace_config(void)
i915_perf_remove_config(drm_fd, config_id);
}
+static bool has_i915_perf_disable_preemption_support(int fd)
+{
+ struct drm_i915_getparam gp;
+ int perf_version = -1;
+
+ memset(&gp, 0, sizeof(gp));
+ gp.param = I915_PARAM_PERF_REVISION;
+ gp.value = &perf_version;
+ igt_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+
+ return perf_version >= 2;
+}
+
+static uint32_t *
+fill_relocation(uint32_t *batch,
+ struct drm_i915_gem_relocation_entry *reloc,
+ uint32_t gem_handle, uint32_t delta, /* in bytes */
+ uint32_t offset, /* in dwords */
+ uint32_t read_domains, uint32_t write_domains)
+{
+ reloc->target_handle = gem_handle;
+ reloc->delta = delta;
+ reloc->offset = offset * sizeof(uint32_t);
+ reloc->presumed_offset = 0;
+ reloc->read_domains = read_domains;
+ reloc->write_domain = write_domains;
+
+ *batch++ = delta;
+ *batch++ = 0;
+
+ return batch;
+}
+
+static uint32_t
+busy_loop(uint32_t context, uint32_t duration_ns, int perf_fd)
+{
+ struct drm_i915_gem_execbuffer_ext_perf execbuf_perf;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 obj[4];
+ struct drm_i915_gem_relocation_entry start_relocs[1];
+ struct drm_i915_gem_relocation_entry chain_relocs[20];
+ uint32_t *batch, *b;
+ uint32_t data_handle;
+ int i, n_relocs;
+
+ memset(obj, 0, sizeof(obj));
+ obj[0].handle = data_handle = gem_create(drm_fd, 4096); /* data */
+ obj[1].handle = gem_create(drm_fd, 4096 * 10); /* loop batch */
+ obj[2].handle = gem_create(drm_fd, 4096); /* end batch */
+ obj[3].handle = gem_create(drm_fd, 4096); /* start batch */
+
+ batch = gem_mmap__cpu(drm_fd, obj[0].handle, 0, 4096,
+ PROT_READ | PROT_WRITE);
+ memset(batch, 0, 4096);
+ gem_munmap(batch, 4096);
+
+ /* start batch */
+ n_relocs = 0;
+ batch = b = gem_mmap__cpu(drm_fd, obj[3].handle, 0, 4096,
+ PROT_READ | PROT_WRITE);
+
+ *b++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *b++ = RCS_TIMESTAMP;
+ *b++ = CS_GPR(0);
+
+ *b++ = MI_BATCH_BUFFER_START | (1 << 8) | (3 - 2);
+ b = fill_relocation(b, &start_relocs[n_relocs++], obj[1].handle,
+ 0, b - batch,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION);
+
+ *b++ = MI_NOOP;
+ *b++ = MI_BATCH_BUFFER_END;
+
+ gem_munmap(batch, 4096);
+
+ obj[3].relocation_count = n_relocs;
+ obj[3].relocs_ptr = to_user_pointer(start_relocs);
+
+ /* end batch */
+ n_relocs = 0;
+ batch = b = gem_mmap__cpu(drm_fd, obj[2].handle, 0, 4096,
+ PROT_READ | PROT_WRITE);
+
+ *b++ = MI_BATCH_BUFFER_END;
+ *b++ = MI_NOOP;
+
+ gem_munmap(batch, 4096);
+
+ /* loop batch */
+ n_relocs = 0;
+ batch = b = gem_mmap__cpu(drm_fd, obj[1].handle, 0, 4096 * 10,
+ PROT_READ | PROT_WRITE);
+
+ *b++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *b++ = RCS_TIMESTAMP;
+ *b++ = CS_GPR(1);
+
+ *b++ = MI_LOAD_REGISTER_IMM_n(1);
+ *b++ = CS_GPR(2);
+ *b++ = time_to_gpu_ticks(duration_ns);
+
+ *b++ = MI_LOAD_REGISTER_IMM_n(1);
+ *b++ = CS_GPR(3);
+ *b++ = 1;
+
+ *b++ = MI_LOAD_REGISTER_MEM | (4 - 2);
+ *b++ = CS_GPR(4);
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[0].handle,
+ 16, b - batch,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+ *b++ = MI_MATH(4);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_LOAD,
+ MI_ALU_OPERAND_SRCA,
+ MI_ALU_OPERAND_REG1);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_LOAD,
+ MI_ALU_OPERAND_SRCB,
+ MI_ALU_OPERAND_REG0);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_SUB,
+ MI_ALU_OPERAND_SRCA,
+ MI_ALU_OPERAND_SRCB);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_STORE,
+ MI_ALU_OPERAND_REG5,
+ MI_ALU_OPERAND_ACCU);
+
+ *b++ = MI_MATH(5);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_LOAD,
+ MI_ALU_OPERAND_SRCA,
+ MI_ALU_OPERAND_REG2);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_LOAD,
+ MI_ALU_OPERAND_SRCB,
+ MI_ALU_OPERAND_REG5);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_SUB,
+ MI_ALU_OPERAND_SRCA,
+ MI_ALU_OPERAND_SRCB);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_STORE,
+ MI_ALU_OPERAND_REG6,
+ MI_ALU_OPERAND_ACCU);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_STORE,
+ MI_ALU_OPERAND_REG7,
+ MI_ALU_OPERAND_CF);
+
+ *b++ = MI_MATH(4);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_LOAD,
+ MI_ALU_OPERAND_SRCA,
+ MI_ALU_OPERAND_REG3);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_LOAD,
+ MI_ALU_OPERAND_SRCB,
+ MI_ALU_OPERAND_REG4);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_ADD,
+ MI_ALU_OPERAND_SRCA,
+ MI_ALU_OPERAND_SRCB);
+ *b++ = MI_ALU_INSTR(MI_ALU_OPCODE_STORE,
+ MI_ALU_OPERAND_REG4,
+ MI_ALU_OPERAND_ACCU);
+
+
+ *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
+ *b++ = CS_GPR(0);
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[0].handle,
+ 0, b - batch,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+ *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
+ *b++ = CS_GPR(1);
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[0].handle,
+ 4, b - batch,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+ *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
+ *b++ = CS_GPR(2);
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[0].handle,
+ 8, b - batch,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+ *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
+ *b++ = CS_GPR(3);
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[0].handle,
+ 12, b - batch,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+ *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
+ *b++ = CS_GPR(4);
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[0].handle,
+ 16, b - batch,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+ *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
+ *b++ = CS_GPR(5);
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[0].handle,
+ 20, b - batch,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+ *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
+ *b++ = CS_GPR(7);
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[0].handle,
+ 28, b - batch,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+ igt_debug("busy delay: ns=%u ticks=%lu\n",
+ duration_ns, time_to_gpu_ticks(duration_ns));
+
+ *b++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *b++ = CS_GPR(7);
+ *b++ = MI_PREDICATE_RESULT_1;
+
+ *b++ = MI_BATCH_BUFFER_START |
+ (1 << 8) | 1 |
+ MI_BATCH_PREDICATE_ENABLE_HSW;
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[2].handle,
+ 0, b - batch,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION);
+
+ *b++ = MI_ARB_CHK; /* Give some ability to preempt */
+ for (i = 0; i < 1000; i++)
+ *b++ = MI_NOOP;
+
+ *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
+ *b++ = CS_GPR(6);
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[0].handle,
+ 24, b - batch,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+ *b++ = MI_BATCH_BUFFER_START |
+ (1 << 8) | 1;
+ b = fill_relocation(b, &chain_relocs[n_relocs++], obj[1].handle,
+ 0, b - batch,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION);
+ *b++ = MI_NOOP;
+ *b++ = MI_BATCH_BUFFER_END;
+ *b++ = MI_NOOP;
+
+ gem_munmap(batch, 4096 * 10);
+
+ obj[1].relocation_count = n_relocs;
+ obj[1].relocs_ptr = to_user_pointer(chain_relocs);
+
+ /*
+ * Now submit with the submission flagged as containing perf
+ * queries if perf_fd != -1.
+ */
+ memset(&execbuf_perf, 0, sizeof(execbuf_perf));
+ execbuf_perf.base.name = DRM_I915_GEM_EXECBUFFER_EXT_PERF;
+ execbuf_perf.perf_fd = perf_fd;
+ execbuf_perf.oa_config = test_metric_set_id;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(obj);
+ execbuf.buffer_count = ARRAY_SIZE(obj);
+ if (perf_fd >= 0) {
+ execbuf.flags |= I915_EXEC_EXT;
+ execbuf.cliprects_ptr = to_user_pointer(&execbuf_perf);
+ }
+ i915_execbuffer2_set_context_id(execbuf, context);
+
+ gem_execbuf(drm_fd, &execbuf);
+
+ for (i = 1 /* skip data_handle */; i < ARRAY_SIZE(obj); i++)
+ gem_close(drm_fd, obj[i].handle);
+
+ return data_handle;
+}
+
+#define NSECS_PER_SEC (1000000000ull)
+
+/*
+ * Verify that preemption is put on hold for the context we filter
+ * with when the perf stream is opened with the
+ * DRM_I915_PERF_PROP_HOLD_PREEMPTION property.
+ */
+static void
+test_single_ctx_counters_disabled_preemption(void)
+{
+ uint64_t properties[] = {
+ DRM_I915_PERF_PROP_CTX_HANDLE, UINT64_MAX, /* updated below */
+ DRM_I915_PERF_PROP_SAMPLE_OA, true,
+ DRM_I915_PERF_PROP_HOLD_PREEMPTION, true,
+
+ /* OA unit configuration */
+ DRM_I915_PERF_PROP_OA_METRICS_SET, test_metric_set_id,
+ DRM_I915_PERF_PROP_OA_FORMAT, test_oa_format,
+ DRM_I915_PERF_PROP_OA_EXPONENT, max_oa_exponent_for_period_lte(1000000),
+ };
+ struct drm_i915_perf_open_param param = {
+ .flags = I915_PERF_FLAG_FD_CLOEXEC,
+ .num_properties = sizeof(properties) / 16,
+ .properties_ptr = to_user_pointer(properties),
+ };
+ uint32_t perf_context = gem_context_create(drm_fd);
+ uint32_t preempt_context = gem_context_create(drm_fd);
+ uint32_t perf_data_handle, preempt_data_handle;
+ uint32_t perf_data[10], preempt_data[10];
+ int i, perf_fd, retries = 0;
+ bool timestamp_loop = false;
+
+ gem_context_set_priority(drm_fd, perf_context,
+ I915_CONTEXT_MIN_USER_PRIORITY);
+ gem_context_set_priority(drm_fd, preempt_context,
+ I915_CONTEXT_MAX_USER_PRIORITY);
+
+ do {
+ /*
+ * First run without perf enabled, preemption should
+ * happen.
+ */
+ perf_data_handle = busy_loop(perf_context,
+ 1ULL * NSECS_PER_SEC, -1);
+
+ /* Wait 500ms before kicking off another busy loop. */
+ usleep(500000);
+ preempt_data_handle = busy_loop(preempt_context, 1ULL * 1000, -1);
+
+ gem_read(drm_fd, perf_data_handle, 0,
+ perf_data, sizeof(perf_data));
+ gem_read(drm_fd, preempt_data_handle, 0,
+ preempt_data, sizeof(preempt_data));
+
+ for (i = 0; i < ARRAY_SIZE(perf_data); i++)
+ igt_debug("perf val%i=0x%x\n", i, perf_data[i]);
+ for (i = 0; i < ARRAY_SIZE(preempt_data); i++)
+ igt_debug("preempt_perf val%i=0x%x\n", i, preempt_data[i]);
+
+ gem_close(drm_fd, perf_data_handle);
+
+ timestamp_loop = perf_data[1] < perf_data[0];
+ if (timestamp_loop) {
+ igt_assert(retries < 2);
+ retries++;
+ } else {
+ igt_assert_lte(perf_data[0], preempt_data[0]);
+ igt_assert_lte(perf_data[0], preempt_data[1]);
+ igt_assert_lte(preempt_data[0], perf_data[1]);
+ igt_assert_lte(preempt_data[1], perf_data[1]);
+ }
+ } while (timestamp_loop);
+
+ /*
+ * Now run with perf enabled, preemption shouldn't happen.
+ */
+
+ properties[1] = perf_context;
+ perf_fd = __perf_open(drm_fd, ¶m, false);
+
+ do {
+ perf_data_handle = busy_loop(perf_context,
+ 1ULL * 1000 * 1000 * 1000, perf_fd);
+
+ /* Wait 500ms before kicking off another busy loop. */
+ usleep(500000);
+ preempt_data_handle = busy_loop(preempt_context, 1ULL * 1000, -1);
+
+ gem_read(drm_fd, perf_data_handle, 0, perf_data, sizeof(perf_data));
+ gem_read(drm_fd, preempt_data_handle, 0,
+ preempt_data, sizeof(preempt_data));
+
+ for (i = 0; i < ARRAY_SIZE(perf_data); i++)
+ igt_debug("perf val%i=0x%x\n", i, perf_data[i]);
+ for (i = 0; i < ARRAY_SIZE(preempt_data); i++)
+ igt_debug("preempt_perf val%i=0x%x\n", i, preempt_data[i]);
+
+ gem_close(drm_fd, perf_data_handle);
+
+ timestamp_loop = preempt_data[1] < perf_data[0];
+ if (timestamp_loop) {
+ igt_assert(retries < 2);
+ retries++;
+ } else {
+ igt_assert_lte(perf_data[0], preempt_data[0]);
+ igt_assert_lte(perf_data[0], preempt_data[1]);
+ igt_assert_lte(perf_data[1], preempt_data[0]);
+ igt_assert_lte(perf_data[1], preempt_data[1]);
+ }
+ } while (timestamp_loop);
+
+ __perf_close(perf_fd);
+
+ gem_context_destroy(drm_fd, perf_context);
+ gem_context_destroy(drm_fd, preempt_context);
+}
+
+/*
+ * Verify that holding preemption is not available for normal users
+ * unless they perf_stream_paranoid is off.
+ */
+static void
+test_unprivileged_single_ctx_counters_disabled_preemption(void)
+{
+ uint64_t properties[] = {
+ DRM_I915_PERF_PROP_CTX_HANDLE, UINT64_MAX, /* updated below */
+ DRM_I915_PERF_PROP_SAMPLE_OA, true,
+ DRM_I915_PERF_PROP_HOLD_PREEMPTION, true,
+
+ /* OA unit configuration */
+ DRM_I915_PERF_PROP_OA_METRICS_SET, test_metric_set_id,
+ DRM_I915_PERF_PROP_OA_FORMAT, test_oa_format,
+ DRM_I915_PERF_PROP_OA_EXPONENT, max_oa_exponent_for_period_lte(1000000),
+ };
+ struct drm_i915_perf_open_param param = {
+ .flags = I915_PERF_FLAG_FD_CLOEXEC,
+ .num_properties = sizeof(properties) / 16,
+ .properties_ptr = to_user_pointer(properties),
+ };
+ uint32_t perf_context = gem_context_create(drm_fd);
+
+ properties[1] = perf_context;
+
+ igt_fork(child, 1) {
+ write_u64_file("/proc/sys/dev/i915/perf_stream_paranoid", 1);
+
+ igt_drop_root();
+
+ do_ioctl_err(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m, EACCES);
+ }
+
+ igt_waitchildren();
+
+ igt_fork(child, 1) {
+ write_u64_file("/proc/sys/dev/i915/perf_stream_paranoid", 0);
+
+ igt_drop_root();
+
+ stream_fd = __perf_open(drm_fd, ¶m, false);
+ __perf_close(stream_fd);
+ }
+
+ igt_waitchildren();
+
+ gem_context_destroy(drm_fd, perf_context);
+}
+
+/*
+ * Invalid cases in which to disable preemption.
+ */
+static void
+test_invalid_disabled_preemption(void)
+{
+ uint64_t properties[] = {
+ /* Missing context */
+ DRM_I915_PERF_PROP_SAMPLE_OA, true,
+ DRM_I915_PERF_PROP_HOLD_PREEMPTION, true,
+
+ /* OA unit configuration */
+ DRM_I915_PERF_PROP_OA_METRICS_SET, test_metric_set_id,
+ DRM_I915_PERF_PROP_OA_FORMAT, test_oa_format,
+ DRM_I915_PERF_PROP_OA_EXPONENT, max_oa_exponent_for_period_lte(1000000),
+ };
+ struct drm_i915_perf_open_param param = {
+ .flags = I915_PERF_FLAG_FD_CLOEXEC,
+ .num_properties = sizeof(properties) / 16,
+ .properties_ptr = to_user_pointer(properties),
+ };
+
+ do_ioctl_err(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m, EINVAL);
+}
+
static unsigned
read_i915_module_ref(void)
{
@@ -4190,6 +4731,21 @@ igt_main
igt_subtest("whitelisted-registers-userspace-config")
test_whitelisted_registers_userspace_config();
+ igt_subtest_group {
+ igt_fixture {
+ igt_require(has_i915_perf_disable_preemption_support(drm_fd));
+ }
+
+ igt_subtest("single-ctx-counters-disabled-preemption")
+ test_single_ctx_counters_disabled_preemption();
+
+ igt_subtest("unprivileged-single-ctx-counters-disabled-preemption")
+ test_unprivileged_single_ctx_counters_disabled_preemption();
+
+ igt_subtest("invalid-disabled-preemption")
+ test_invalid_disabled_preemption();
+ }
+
igt_fixture {
/* leave sysctl options in their default state... */
write_u64_file("/proc/sys/dev/i915/oa_max_sample_rate", 100000);
--
2.22.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 21+ messages in thread* [igt-dev] [PATCH i-g-t 04/10] tests/perf: add tests to new OA reconfiguration execbuf extension
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
` (2 preceding siblings ...)
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 03/10] tests/perf: add tests for holding preemption Lionel Landwerlin
@ 2019-07-25 10:30 ` Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 05/10] lib/syncobj: drop local declarations Lionel Landwerlin
` (8 subsequent siblings)
12 siblings, 0 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:30 UTC (permalink / raw)
To: igt-dev
This is just basic validation of the input.
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
tests/perf.c | 143 +++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 143 insertions(+)
diff --git a/tests/perf.c b/tests/perf.c
index 3bbc898a..1152ab73 100644
--- a/tests/perf.c
+++ b/tests/perf.c
@@ -4051,6 +4051,19 @@ static bool has_i915_perf_disable_preemption_support(int fd)
return perf_version >= 2;
}
+static bool has_i915_exec_perf(int fd)
+{
+ struct drm_i915_getparam gp;
+ int perf_config = 0;
+
+ memset(&gp, 0, sizeof(gp));
+ gp.param = I915_PARAM_HAS_EXEC_PERF_CONFIG;
+ gp.value = &perf_config;
+ igt_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+
+ return perf_config != 0;
+}
+
static uint32_t *
fill_relocation(uint32_t *batch,
struct drm_i915_gem_relocation_entry *reloc,
@@ -4493,6 +4506,124 @@ test_invalid_disabled_preemption(void)
do_ioctl_err(drm_fd, DRM_IOCTL_I915_PERF_OPEN, ¶m, EINVAL);
}
+/* Test that requesting OA reconfiguration through execubffer fails if
+ * the parameters are invalid.
+ */
+static void
+test_exec_perf_invalid(void)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_execbuffer_ext_perf perf_config;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+ execbuf.flags = I915_EXEC_EXT;
+ execbuf.cliprects_ptr = to_user_pointer(&perf_config);
+ execbuf.num_cliprects = 0;
+
+ memset(&obj, 0, sizeof(obj));
+ obj.handle = gem_create(drm_fd, 4096);
+ gem_write(drm_fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ /* Invalid perf fd */
+ memset(&perf_config, 0, sizeof(perf_config));
+ perf_config.base.name = DRM_I915_GEM_EXECBUFFER_EXT_PERF;
+ perf_config.perf_fd = -1;
+ perf_config.oa_config = test_metric_set_id;
+ igt_assert_eq(__gem_execbuf(drm_fd, &execbuf), -EINVAL);
+
+ {
+ uint64_t properties[] = {
+ /* Include OA reports in samples */
+ DRM_I915_PERF_PROP_SAMPLE_OA, true,
+
+ /* OA unit configuration */
+ DRM_I915_PERF_PROP_OA_METRICS_SET, test_metric_set_id,
+ DRM_I915_PERF_PROP_OA_FORMAT, test_oa_format,
+ DRM_I915_PERF_PROP_OA_EXPONENT, oa_exp_1_millisec,
+ };
+ struct drm_i915_perf_open_param param = {
+ .flags = I915_PERF_FLAG_FD_CLOEXEC |
+ I915_PERF_FLAG_FD_NONBLOCK,
+ .num_properties = sizeof(properties) / 16,
+ .properties_ptr = to_user_pointer(properties),
+ };
+
+ stream_fd = __perf_open(drm_fd, ¶m, false);
+ }
+
+ /* Invalid perf fd */
+ memset(&perf_config, 0, sizeof(perf_config));
+ perf_config.base.name = DRM_I915_GEM_EXECBUFFER_EXT_PERF;
+ perf_config.perf_fd = stream_fd + 1;
+ perf_config.oa_config = test_metric_set_id;
+ igt_assert_eq(__gem_execbuf(drm_fd, &execbuf), -EINVAL);
+
+ /* Invalid OA config */
+ memset(&perf_config, 0, sizeof(perf_config));
+ perf_config.base.name = DRM_I915_GEM_EXECBUFFER_EXT_PERF;
+ perf_config.perf_fd = stream_fd;
+ perf_config.oa_config = UINT64_MAX;
+ igt_assert_eq(__gem_execbuf(drm_fd, &execbuf), -EINVAL);
+
+ gem_close(drm_fd, obj.handle);
+ __perf_close(stream_fd);
+}
+
+static void
+test_exec_perf(void)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_execbuffer_ext_perf perf_config;
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+ execbuf.flags = I915_EXEC_EXT;
+ execbuf.cliprects_ptr = to_user_pointer(&perf_config);
+ execbuf.num_cliprects = 0;
+
+ memset(&obj, 0, sizeof(obj));
+ obj.handle = gem_create(drm_fd, 4096);
+ gem_write(drm_fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ {
+ uint64_t properties[] = {
+ /* Include OA reports in samples */
+ DRM_I915_PERF_PROP_SAMPLE_OA, true,
+
+ /* OA unit configuration */
+ DRM_I915_PERF_PROP_OA_METRICS_SET, test_metric_set_id,
+ DRM_I915_PERF_PROP_OA_FORMAT, test_oa_format,
+ DRM_I915_PERF_PROP_OA_EXPONENT, oa_exp_1_millisec,
+ };
+ struct drm_i915_perf_open_param param = {
+ .flags = I915_PERF_FLAG_FD_CLOEXEC |
+ I915_PERF_FLAG_FD_NONBLOCK,
+ .num_properties = sizeof(properties) / 16,
+ .properties_ptr = to_user_pointer(properties),
+ };
+
+ stream_fd = __perf_open(drm_fd, ¶m, false);
+ }
+
+ memset(&perf_config, 0, sizeof(perf_config));
+ perf_config.base.name = DRM_I915_GEM_EXECBUFFER_EXT_PERF;
+ perf_config.perf_fd = stream_fd;
+ perf_config.oa_config = test_metric_set_id;
+ gem_execbuf(drm_fd, &execbuf);
+
+ gem_wait(drm_fd, obj.handle, NULL);
+
+ gem_close(drm_fd, obj.handle);
+ __perf_close(stream_fd);
+}
+
static unsigned
read_i915_module_ref(void)
{
@@ -4746,6 +4877,18 @@ igt_main
test_invalid_disabled_preemption();
}
+ igt_subtest_group {
+ igt_fixture {
+ igt_require(has_i915_exec_perf(drm_fd));
+ }
+
+ igt_subtest("exec-perf-invalid")
+ test_exec_perf_invalid();
+
+ igt_subtest("exec-perf")
+ test_exec_perf();
+ }
+
igt_fixture {
/* leave sysctl options in their default state... */
write_u64_file("/proc/sys/dev/i915/oa_max_sample_rate", 100000);
--
2.22.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 21+ messages in thread* [igt-dev] [PATCH i-g-t 05/10] lib/syncobj: drop local declarations
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
` (3 preceding siblings ...)
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 04/10] tests/perf: add tests to new OA reconfiguration execbuf extension Lionel Landwerlin
@ 2019-07-25 10:30 ` Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 06/10] igt: add timeline test cases Lionel Landwerlin
` (7 subsequent siblings)
12 siblings, 0 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:30 UTC (permalink / raw)
To: igt-dev
We have copies of the DRM uAPI headers in the repo, so drop the local
declaration of syncobj defines/types.
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
lib/igt_syncobj.c | 16 ++++++------
lib/igt_syncobj.h | 26 +------------------
tests/syncobj_basic.c | 2 +-
tests/syncobj_wait.c | 58 +++++++++++++++++++++----------------------
4 files changed, 39 insertions(+), 63 deletions(-)
diff --git a/lib/igt_syncobj.c b/lib/igt_syncobj.c
index 0fddb97a..e5569ffc 100644
--- a/lib/igt_syncobj.c
+++ b/lib/igt_syncobj.c
@@ -170,10 +170,10 @@ syncobj_import_sync_file(int fd, uint32_t handle, int sync_file)
}
int
-__syncobj_wait(int fd, struct local_syncobj_wait *args)
+__syncobj_wait(int fd, struct drm_syncobj_wait *args)
{
int err = 0;
- if (drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_WAIT, args))
+ if (drmIoctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, args))
err = -errno;
return err;
}
@@ -182,7 +182,7 @@ int
syncobj_wait_err(int fd, uint32_t *handles, uint32_t count,
uint64_t abs_timeout_nsec, uint32_t flags)
{
- struct local_syncobj_wait wait;
+ struct drm_syncobj_wait wait;
wait.handles = to_user_pointer(handles);
wait.timeout_nsec = abs_timeout_nsec;
@@ -212,7 +212,7 @@ syncobj_wait(int fd, uint32_t *handles, uint32_t count,
uint64_t abs_timeout_nsec, uint32_t flags,
uint32_t *first_signaled)
{
- struct local_syncobj_wait wait;
+ struct drm_syncobj_wait wait;
int ret;
wait.handles = to_user_pointer(handles);
@@ -236,12 +236,12 @@ syncobj_wait(int fd, uint32_t *handles, uint32_t count,
static int
__syncobj_reset(int fd, uint32_t *handles, uint32_t count)
{
- struct local_syncobj_array array = { 0 };
+ struct drm_syncobj_array array = { 0 };
int err = 0;
array.handles = to_user_pointer(handles);
array.count_handles = count;
- if (drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_RESET, &array))
+ if (drmIoctl(fd, DRM_IOCTL_SYNCOBJ_RESET, &array))
err = -errno;
return err;
}
@@ -263,12 +263,12 @@ syncobj_reset(int fd, uint32_t *handles, uint32_t count)
static int
__syncobj_signal(int fd, uint32_t *handles, uint32_t count)
{
- struct local_syncobj_array array = { 0 };
+ struct drm_syncobj_array array = { 0 };
int err = 0;
array.handles = to_user_pointer(handles);
array.count_handles = count;
- if (drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_SIGNAL, &array))
+ if (drmIoctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &array))
err = -errno;
return err;
}
diff --git a/lib/igt_syncobj.h b/lib/igt_syncobj.h
index 44d1378d..51ad2364 100644
--- a/lib/igt_syncobj.h
+++ b/lib/igt_syncobj.h
@@ -28,30 +28,6 @@
#include <stdbool.h>
#include <drm.h>
-#define LOCAL_SYNCOBJ_CREATE_SIGNALED (1 << 0)
-
-#define LOCAL_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
-#define LOCAL_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
-struct local_syncobj_wait {
- __u64 handles;
- /* absolute timeout */
- __s64 timeout_nsec;
- __u32 count_handles;
- __u32 flags;
- __u32 first_signaled; /* only valid when not waiting all */
- __u32 pad;
-};
-
-struct local_syncobj_array {
- __u64 handles;
- __u32 count_handles;
- __u32 pad;
-};
-
-#define LOCAL_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct local_syncobj_wait)
-#define LOCAL_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct local_syncobj_array)
-#define LOCAL_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct local_syncobj_array)
-
uint32_t syncobj_create(int fd, uint32_t flags);
void syncobj_destroy(int fd, uint32_t handle);
int __syncobj_handle_to_fd(int fd, struct drm_syncobj_handle *args);
@@ -59,7 +35,7 @@ int __syncobj_fd_to_handle(int fd, struct drm_syncobj_handle *args);
int syncobj_handle_to_fd(int fd, uint32_t handle, uint32_t flags);
uint32_t syncobj_fd_to_handle(int fd, int syncobj_fd, uint32_t flags);
void syncobj_import_sync_file(int fd, uint32_t handle, int sync_file);
-int __syncobj_wait(int fd, struct local_syncobj_wait *args);
+int __syncobj_wait(int fd, struct drm_syncobj_wait *args);
int syncobj_wait_err(int fd, uint32_t *handles, uint32_t count,
uint64_t abs_timeout_nsec, uint32_t flags);
bool syncobj_wait(int fd, uint32_t *handles, uint32_t count,
diff --git a/tests/syncobj_basic.c b/tests/syncobj_basic.c
index 44769d3b..1dce45c9 100644
--- a/tests/syncobj_basic.c
+++ b/tests/syncobj_basic.c
@@ -149,7 +149,7 @@ test_bad_create_flags(int fd)
static void
test_create_signaled(int fd)
{
- uint32_t syncobj = syncobj_create(fd, LOCAL_SYNCOBJ_CREATE_SIGNALED);
+ uint32_t syncobj = syncobj_create(fd, DRM_SYNCOBJ_CREATE_SIGNALED);
igt_assert_eq(syncobj_wait_err(fd, &syncobj, 1, 0, 0), 0);
diff --git a/tests/syncobj_wait.c b/tests/syncobj_wait.c
index 04d79de7..669d0adf 100644
--- a/tests/syncobj_wait.c
+++ b/tests/syncobj_wait.c
@@ -140,7 +140,7 @@ syncobj_trigger_delayed(int fd, uint32_t syncobj, uint64_t nsec)
static void
test_wait_bad_flags(int fd)
{
- struct local_syncobj_wait wait = { 0 };
+ struct drm_syncobj_wait wait = { 0 };
wait.flags = 0xdeadbeef;
igt_assert_eq(__syncobj_wait(fd, &wait), -EINVAL);
}
@@ -148,14 +148,14 @@ test_wait_bad_flags(int fd)
static void
test_wait_zero_handles(int fd)
{
- struct local_syncobj_wait wait = { 0 };
+ struct drm_syncobj_wait wait = { 0 };
igt_assert_eq(__syncobj_wait(fd, &wait), -EINVAL);
}
static void
test_wait_illegal_handle(int fd)
{
- struct local_syncobj_wait wait = { 0 };
+ struct drm_syncobj_wait wait = { 0 };
uint32_t handle = 0;
wait.count_handles = 1;
@@ -166,43 +166,43 @@ test_wait_illegal_handle(int fd)
static void
test_reset_zero_handles(int fd)
{
- struct local_syncobj_array array = { 0 };
+ struct drm_syncobj_array array = { 0 };
int ret;
- ret = drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_RESET, &array);
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_RESET, &array);
igt_assert(ret == -1 && errno == EINVAL);
}
static void
test_reset_illegal_handle(int fd)
{
- struct local_syncobj_array array = { 0 };
+ struct drm_syncobj_array array = { 0 };
uint32_t handle = 0;
int ret;
array.count_handles = 1;
array.handles = to_user_pointer(&handle);
- ret = drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_RESET, &array);
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_RESET, &array);
igt_assert(ret == -1 && errno == ENOENT);
}
static void
test_reset_one_illegal_handle(int fd)
{
- struct local_syncobj_array array = { 0 };
+ struct drm_syncobj_array array = { 0 };
uint32_t syncobjs[3];
int ret;
- syncobjs[0] = syncobj_create(fd, LOCAL_SYNCOBJ_CREATE_SIGNALED);
+ syncobjs[0] = syncobj_create(fd, DRM_SYNCOBJ_CREATE_SIGNALED);
syncobjs[1] = 0;
- syncobjs[2] = syncobj_create(fd, LOCAL_SYNCOBJ_CREATE_SIGNALED);
+ syncobjs[2] = syncobj_create(fd, DRM_SYNCOBJ_CREATE_SIGNALED);
igt_assert_eq(syncobj_wait_err(fd, &syncobjs[0], 1, 0, 0), 0);
igt_assert_eq(syncobj_wait_err(fd, &syncobjs[2], 1, 0, 0), 0);
array.count_handles = 3;
array.handles = to_user_pointer(syncobjs);
- ret = drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_RESET, &array);
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_RESET, &array);
igt_assert(ret == -1 && errno == ENOENT);
/* Assert that we didn't actually reset anything */
@@ -216,44 +216,44 @@ test_reset_one_illegal_handle(int fd)
static void
test_reset_bad_pad(int fd)
{
- struct local_syncobj_array array = { 0 };
+ struct drm_syncobj_array array = { 0 };
uint32_t handle = 0;
int ret;
array.pad = 0xdeadbeef;
array.count_handles = 1;
array.handles = to_user_pointer(&handle);
- ret = drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_RESET, &array);
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_RESET, &array);
igt_assert(ret == -1 && errno == EINVAL);
}
static void
test_signal_zero_handles(int fd)
{
- struct local_syncobj_array array = { 0 };
+ struct drm_syncobj_array array = { 0 };
int ret;
- ret = drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_SIGNAL, &array);
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &array);
igt_assert(ret == -1 && errno == EINVAL);
}
static void
test_signal_illegal_handle(int fd)
{
- struct local_syncobj_array array = { 0 };
+ struct drm_syncobj_array array = { 0 };
uint32_t handle = 0;
int ret;
array.count_handles = 1;
array.handles = to_user_pointer(&handle);
- ret = drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_SIGNAL, &array);
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &array);
igt_assert(ret == -1 && errno == ENOENT);
}
static void
test_signal_one_illegal_handle(int fd)
{
- struct local_syncobj_array array = { 0 };
+ struct drm_syncobj_array array = { 0 };
uint32_t syncobjs[3];
int ret;
@@ -266,7 +266,7 @@ test_signal_one_illegal_handle(int fd)
array.count_handles = 3;
array.handles = to_user_pointer(syncobjs);
- ret = drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_SIGNAL, &array);
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &array);
igt_assert(ret == -1 && errno == ENOENT);
/* Assert that we didn't actually reset anything */
@@ -280,14 +280,14 @@ test_signal_one_illegal_handle(int fd)
static void
test_signal_bad_pad(int fd)
{
- struct local_syncobj_array array = { 0 };
+ struct drm_syncobj_array array = { 0 };
uint32_t handle = 0;
int ret;
array.pad = 0xdeadbeef;
array.count_handles = 1;
array.handles = to_user_pointer(&handle);
- ret = drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_SIGNAL, &array);
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &array);
igt_assert(ret == -1 && errno == EINVAL);
}
@@ -304,10 +304,10 @@ flags_for_test_flags(uint32_t test_flags)
uint32_t flags = 0;
if (test_flags & WAIT_FOR_SUBMIT)
- flags |= LOCAL_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
+ flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
if (test_flags & WAIT_ALL)
- flags |= LOCAL_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
+ flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
return flags;
}
@@ -432,7 +432,7 @@ static void
test_reset_during_wait_for_submit(int fd)
{
uint32_t syncobj = syncobj_create(fd, 0);
- uint32_t flags = LOCAL_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
+ uint32_t flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
struct fd_handle_pair pair;
timer_t timer;
@@ -454,7 +454,7 @@ static void
test_signal(int fd)
{
uint32_t syncobj = syncobj_create(fd, 0);
- uint32_t flags = LOCAL_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
+ uint32_t flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
igt_assert_eq(syncobj_wait_err(fd, &syncobj, 1, 0, 0), -EINVAL);
igt_assert_eq(syncobj_wait_err(fd, &syncobj, 1, 0, flags), -ETIME);
@@ -511,7 +511,7 @@ test_multi_wait(int fd, uint32_t test_flags, int expect)
struct wait_thread_data {
int fd;
- struct local_syncobj_wait wait;
+ struct drm_syncobj_wait wait;
};
static void *
@@ -713,7 +713,7 @@ test_wait_complex(int fd, uint32_t test_flags)
static void
test_wait_interrupted(int fd, uint32_t test_flags)
{
- struct local_syncobj_wait wait = { 0 };
+ struct drm_syncobj_wait wait = { 0 };
uint32_t syncobj = syncobj_create(fd, 0);
int timeline;
@@ -740,7 +740,7 @@ test_wait_interrupted(int fd, uint32_t test_flags)
static bool
has_syncobj_wait(int fd)
{
- struct local_syncobj_wait wait = { 0 };
+ struct drm_syncobj_wait wait = { 0 };
uint32_t handle = 0;
uint64_t value;
int ret;
@@ -753,7 +753,7 @@ has_syncobj_wait(int fd)
/* Try waiting for zero sync objects should fail with EINVAL */
wait.count_handles = 1;
wait.handles = to_user_pointer(&handle);
- ret = drmIoctl(fd, LOCAL_IOCTL_SYNCOBJ_WAIT, &wait);
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
return ret == -1 && errno == ENOENT;
}
--
2.22.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 21+ messages in thread* [igt-dev] [PATCH i-g-t 06/10] igt: add timeline test cases
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
` (4 preceding siblings ...)
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 05/10] lib/syncobj: drop local declarations Lionel Landwerlin
@ 2019-07-25 10:30 ` Lionel Landwerlin
2019-07-26 2:56 ` zhoucm1
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 07/10] tests/syncobj_timeline: add more timeline tests Lionel Landwerlin
` (6 subsequent siblings)
12 siblings, 1 reply; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:30 UTC (permalink / raw)
To: igt-dev; +Cc: Chunming Zhou
From: Chunming Zhou <david1.zhou@amd.com>
v2: adapt to new transfer ioctl
v3: Drop useless uint64_t casts (Lionel)
Fix timeline query prototypes (Lionel)
Test multi wait with timeline & binary syncobjs (Lionel)
v4: Switch from drmIoctl to igt_ioctl in tests/*.c (Chris)
Clear out errno in helper functions (Chris)
v5: Fix lib comments on transfer helpers (Lionel)
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
lib/igt_syncobj.c | 237 +++++++++
lib/igt_syncobj.h | 19 +
tests/meson.build | 1 +
tests/syncobj_timeline.c | 1049 ++++++++++++++++++++++++++++++++++++++
4 files changed, 1306 insertions(+)
create mode 100644 tests/syncobj_timeline.c
diff --git a/lib/igt_syncobj.c b/lib/igt_syncobj.c
index e5569ffc..318078f6 100644
--- a/lib/igt_syncobj.c
+++ b/lib/igt_syncobj.c
@@ -286,3 +286,240 @@ syncobj_signal(int fd, uint32_t *handles, uint32_t count)
{
igt_assert_eq(__syncobj_signal(fd, handles, count), 0);
}
+
+static int
+__syncobj_timeline_signal(int fd, uint32_t *handles, uint64_t *points, uint32_t count)
+{
+ struct drm_syncobj_timeline_array array = { 0 };
+ int err = 0;
+
+ array.handles = to_user_pointer(handles);
+ array.points = to_user_pointer(points);
+ array.count_handles = count;
+ if (drmIoctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &array)) {
+ err = -errno;
+ igt_assume(err);
+ }
+ errno = 0;
+ return err;
+}
+
+/**
+ * syncobj_signal:
+ * @fd: The DRM file descriptor.
+ * @handles: Array of syncobj handles to signal
+ * @points: List of point of handles to signal.
+ * @count: Count of syncobj handles.
+ *
+ * Signal a set of syncobjs.
+ */
+void
+syncobj_timeline_signal(int fd, uint32_t *handles, uint64_t *points, uint32_t count)
+{
+ igt_assert_eq(__syncobj_timeline_signal(fd, handles, points, count), 0);
+}
+int
+__syncobj_timeline_wait_ioctl(int fd, struct drm_syncobj_timeline_wait *args)
+{
+ int err = 0;
+ if (drmIoctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, args)) {
+ err = -errno;
+ igt_assume(err);
+ }
+ errno = 0;
+ return err;
+}
+static int
+__syncobj_timeline_wait(int fd, uint32_t *handles, uint64_t *points,
+ unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled)
+{
+ struct drm_syncobj_timeline_wait args;
+ int ret;
+
+ args.handles = to_user_pointer(handles);
+ args.points = to_user_pointer(points);
+ args.timeout_nsec = timeout_nsec;
+ args.count_handles = num_handles;
+ args.flags = flags;
+ args.first_signaled = 0;
+ args.pad = 0;
+
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);
+ if (ret < 0) {
+ ret = -errno;
+ igt_assume(ret);
+ }
+ errno = 0;
+
+ if (first_signaled)
+ *first_signaled = args.first_signaled;
+
+ return ret;
+}
+int
+syncobj_timeline_wait_err(int fd, uint32_t *handles, uint64_t *points,
+ unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags)
+{
+ return __syncobj_timeline_wait(fd, handles, points, num_handles,
+ timeout_nsec, flags, NULL);
+}
+
+/**
+ * syncobj_timeline_wait:
+ * @fd: The DRM file descriptor
+ * @handles: List of syncobj handles to wait for.
+ * @points: List of point of handles to wait for.
+ * @num_handles: Count of handles
+ * @timeout_nsec: Absolute wait timeout in nanoseconds.
+ * @flags: Wait ioctl flags.
+ * @first_signaled: Returned handle for first signaled syncobj.
+ *
+ * Waits in the kernel for any/all the requested syncobjs timeline point
+ * using the timeout and flags.
+ * Returns: bool value - false = timedout, true = signaled
+ */
+bool
+syncobj_timeline_wait(int fd, uint32_t *handles, uint64_t *points,
+ unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled)
+{
+ int ret;
+
+ ret = __syncobj_timeline_wait(fd, handles, points, num_handles,
+ timeout_nsec, flags, first_signaled);
+ if (ret == -ETIME)
+ return false;
+ igt_assert_eq(ret, 0);
+
+ return true;
+
+}
+
+static int
+__syncobj_timeline_query(int fd, uint32_t *handles, uint64_t *points,
+ uint32_t handle_count)
+{
+ struct drm_syncobj_timeline_array args;
+ int ret;
+
+ args.handles = to_user_pointer(handles);
+ args.points = to_user_pointer(points);
+ args.count_handles = handle_count;
+ args.pad = 0;
+
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
+ if (ret) {
+ ret = -errno;
+ igt_assume(ret);
+ }
+
+ errno = 0;
+ return ret;
+}
+
+/**
+ * syncobj_timeline_query:
+ * @fd: The DRM file descriptor.
+ * @handles: Array of syncobj handles.
+ * @points: Array of syncobj points queried.
+ * @count: Count of syncobj handles.
+ *
+ * Queries a set of syncobjs.
+ */
+void
+syncobj_timeline_query(int fd, uint32_t *handles, uint64_t *points,
+ uint32_t count)
+{
+ igt_assert_eq(__syncobj_timeline_query(fd, handles, points, count), 0);
+}
+
+static int
+__syncobj_binary_to_timeline(int fd, uint32_t timeline_handle,
+ uint64_t point, uint32_t binary_handle)
+{
+ struct drm_syncobj_transfer args;
+ int ret;
+
+ args.src_handle = binary_handle;
+ args.dst_handle = timeline_handle;
+ args.src_point = 0;
+ args.dst_point = point;
+ args.flags = 0;
+ args.pad = 0;
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_TRANSFER, &args);
+ if (ret) {
+ ret = -errno;
+ igt_assert(ret);
+ }
+
+ errno = 0;
+ return ret;
+}
+
+/**
+ * syncobj_binary_to_timeline:
+ * @fd: The DRM file descriptor.
+ * @timeline_handle: A syncobj timeline handle
+ * @point: A syncobj timeline point in the timeline handle
+ * @binary_handle: A syncobj binary handle
+ *
+ * Transfers a DMA fence from a binary syncobj into a timeline syncobj
+ * at a given point on the timeline.
+ */
+void
+syncobj_binary_to_timeline(int fd, uint32_t timeline_handle,
+ uint64_t point, uint32_t binary_handle)
+{
+ igt_assert_eq(__syncobj_binary_to_timeline(fd, timeline_handle, point,
+ binary_handle), 0);
+}
+
+static int
+__syncobj_timeline_to_binary(int fd, uint32_t binary_handle,
+ uint32_t timeline_handle,
+ uint64_t point,
+ uint32_t flags)
+{
+ struct drm_syncobj_transfer args;
+ int ret;
+
+ args.dst_handle = binary_handle;
+ args.src_handle = timeline_handle;
+ args.dst_point = 0;
+ args.src_point = point;
+ args.flags = flags;
+ args.pad = 0;
+ ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_TRANSFER, &args);
+ if (ret) {
+ ret = -errno;
+ igt_assert(ret);
+ }
+
+ errno = 0;
+ return ret;
+}
+
+/**
+ * syncobj_binary_to_timeline:
+ * @fd: The DRM file descriptor.
+ * @binary_handle: A syncobj binary handle
+ * @timeline_handle: A syncobj timeline handle
+ * @point: A syncobj timeline point in the timeline handle
+ *
+ * Transfers DMA fence from a given point from timeline syncobj into a
+ * binary syncobj.
+ */
+void
+syncobj_timeline_to_binary(int fd, uint32_t binary_handle,
+ uint32_t timeline_handle,
+ uint64_t point,
+ uint32_t flags)
+{
+ igt_assert_eq(__syncobj_timeline_to_binary(fd, binary_handle,
+ timeline_handle, point,
+ flags), 0);
+}
diff --git a/lib/igt_syncobj.h b/lib/igt_syncobj.h
index 51ad2364..20f1f18f 100644
--- a/lib/igt_syncobj.h
+++ b/lib/igt_syncobj.h
@@ -41,7 +41,26 @@ int syncobj_wait_err(int fd, uint32_t *handles, uint32_t count,
bool syncobj_wait(int fd, uint32_t *handles, uint32_t count,
uint64_t abs_timeout_nsec, uint32_t flags,
uint32_t *first_signaled);
+int __syncobj_timeline_wait_ioctl(int fd,
+ struct drm_syncobj_timeline_wait *args);
+bool syncobj_timeline_wait(int fd, uint32_t *handles, uint64_t *points,
+ unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags,
+ uint32_t *first_signaled);
+int syncobj_timeline_wait_err(int fd, uint32_t *handles, uint64_t *points,
+ unsigned num_handles,
+ int64_t timeout_nsec, unsigned flags);
void syncobj_reset(int fd, uint32_t *handles, uint32_t count);
void syncobj_signal(int fd, uint32_t *handles, uint32_t count);
+void syncobj_timeline_query(int fd, uint32_t *handles, uint64_t *points,
+ uint32_t count);
+void syncobj_binary_to_timeline(int fd, uint32_t timeline_handle,
+ uint64_t point, uint32_t binary_handle);
+void syncobj_timeline_to_binary(int fd, uint32_t binary_handle,
+ uint32_t timeline_handle,
+ uint64_t point,
+ uint32_t flags);
+void syncobj_timeline_signal(int fd, uint32_t *handles, uint64_t *points,
+ uint32_t count);
#endif /* IGT_SYNCOBJ_H */
diff --git a/tests/meson.build b/tests/meson.build
index 34a74025..905e2435 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -78,6 +78,7 @@ test_progs = [
'prime_vgem',
'syncobj_basic',
'syncobj_wait',
+ 'syncobj_timeline',
'template',
'tools_test',
'v3d_get_bo_offset',
diff --git a/tests/syncobj_timeline.c b/tests/syncobj_timeline.c
new file mode 100644
index 00000000..7fd602de
--- /dev/null
+++ b/tests/syncobj_timeline.c
@@ -0,0 +1,1049 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "igt.h"
+#include "sw_sync.h"
+#include "igt_syncobj.h"
+#include <unistd.h>
+#include <time.h>
+#include <sys/ioctl.h>
+#include <pthread.h>
+#include <signal.h>
+#include "drm.h"
+
+IGT_TEST_DESCRIPTION("Tests for the drm timeline sync object API");
+
+/* One tenth of a second */
+#define SHORT_TIME_NSEC 100000000ull
+
+#define NSECS_PER_SEC 1000000000ull
+
+static uint64_t
+gettime_ns(void)
+{
+ struct timespec current;
+ clock_gettime(CLOCK_MONOTONIC, ¤t);
+ return (uint64_t)current.tv_sec * NSECS_PER_SEC + current.tv_nsec;
+}
+
+static void
+sleep_nsec(uint64_t time_nsec)
+{
+ struct timespec t;
+ t.tv_sec = time_nsec / NSECS_PER_SEC;
+ t.tv_nsec = time_nsec % NSECS_PER_SEC;
+ igt_assert_eq(nanosleep(&t, NULL), 0);
+}
+
+static uint64_t
+short_timeout(void)
+{
+ return gettime_ns() + SHORT_TIME_NSEC;
+}
+
+static int
+syncobj_attach_sw_sync(int fd, uint32_t handle, uint64_t point)
+{
+ int timeline, fence;
+
+ timeline = sw_sync_timeline_create();
+ fence = sw_sync_timeline_create_fence(timeline, 1);
+
+ if (point == 0) {
+ syncobj_import_sync_file(fd, handle, fence);
+ } else {
+ uint32_t syncobj = syncobj_create(fd, 0);
+
+ syncobj_import_sync_file(fd, syncobj, fence);
+ syncobj_binary_to_timeline(fd, handle, point, syncobj);
+ syncobj_destroy(fd, syncobj);
+ }
+
+ close(fence);
+
+ return timeline;
+}
+
+static void
+syncobj_trigger(int fd, uint32_t handle, uint64_t point)
+{
+ int timeline = syncobj_attach_sw_sync(fd, handle, point);
+ sw_sync_timeline_inc(timeline, 1);
+ close(timeline);
+}
+
+static timer_t
+set_timer(void (*cb)(union sigval), void *ptr, int i, uint64_t nsec)
+{
+ timer_t timer;
+ struct sigevent sev;
+ struct itimerspec its;
+
+ memset(&sev, 0, sizeof(sev));
+ sev.sigev_notify = SIGEV_THREAD;
+ if (ptr)
+ sev.sigev_value.sival_ptr = ptr;
+ else
+ sev.sigev_value.sival_int = i;
+ sev.sigev_notify_function = cb;
+ igt_assert(timer_create(CLOCK_MONOTONIC, &sev, &timer) == 0);
+
+ memset(&its, 0, sizeof(its));
+ its.it_value.tv_sec = nsec / NSEC_PER_SEC;
+ its.it_value.tv_nsec = nsec % NSEC_PER_SEC;
+ igt_assert(timer_settime(timer, 0, &its, NULL) == 0);
+
+ return timer;
+}
+
+struct fd_handle_pair {
+ int fd;
+ uint32_t handle;
+ uint64_t point;
+};
+
+static void
+timeline_inc_func(union sigval sigval)
+{
+ sw_sync_timeline_inc(sigval.sival_int, 1);
+}
+
+static void
+syncobj_trigger_free_pair_func(union sigval sigval)
+{
+ struct fd_handle_pair *pair = sigval.sival_ptr;
+ syncobj_trigger(pair->fd, pair->handle, pair->point);
+ free(pair);
+}
+
+static timer_t
+syncobj_trigger_delayed(int fd, uint32_t syncobj, uint64_t point, uint64_t nsec)
+{
+ struct fd_handle_pair *pair = malloc(sizeof(*pair));
+
+ pair->fd = fd;
+ pair->handle = syncobj;
+ pair->point = point;
+
+ return set_timer(syncobj_trigger_free_pair_func, pair, 0, nsec);
+}
+
+static void
+test_wait_bad_flags(int fd)
+{
+ struct drm_syncobj_timeline_wait wait = {};
+ wait.flags = 0xdeadbeef;
+ igt_assert_eq(__syncobj_timeline_wait_ioctl(fd, &wait), -EINVAL);
+}
+
+static void
+test_wait_zero_handles(int fd)
+{
+ struct drm_syncobj_timeline_wait wait = {};
+ igt_assert_eq(__syncobj_timeline_wait_ioctl(fd, &wait), -EINVAL);
+}
+
+static void
+test_wait_illegal_handle(int fd)
+{
+ struct drm_syncobj_timeline_wait wait = {};
+ uint32_t handle = 0;
+
+ wait.count_handles = 1;
+ wait.handles = to_user_pointer(&handle);
+ igt_assert_eq(__syncobj_timeline_wait_ioctl(fd, &wait), -ENOENT);
+}
+
+static void
+test_query_zero_handles(int fd)
+{
+ struct drm_syncobj_timeline_array args = {};
+ int ret;
+
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
+ igt_assert(ret == -1 && errno == EINVAL);
+}
+
+static void
+test_query_illegal_handle(int fd)
+{
+ struct drm_syncobj_timeline_array args = {};
+ uint32_t handle = 0;
+ int ret;
+
+ args.count_handles = 1;
+ args.handles = to_user_pointer(&handle);
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
+ igt_assert(ret == -1 && errno == ENOENT);
+}
+
+static void
+test_query_one_illegal_handle(int fd)
+{
+ struct drm_syncobj_timeline_array array = {};
+ uint32_t syncobjs[3];
+ uint64_t initial_point = 1;
+ int ret;
+
+ syncobjs[0] = syncobj_create(fd, 0);
+ syncobjs[1] = 0;
+ syncobjs[2] = syncobj_create(fd, 0);
+
+ syncobj_timeline_signal(fd, &syncobjs[0], &initial_point, 1);
+ syncobj_timeline_signal(fd, &syncobjs[2], &initial_point, 1);
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobjs[0],
+ &initial_point, 1, 0, 0), 0);
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobjs[2],
+ &initial_point, 1, 0, 0), 0);
+
+ array.count_handles = 3;
+ array.handles = to_user_pointer(syncobjs);
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_QUERY, &array);
+ igt_assert(ret == -1 && errno == ENOENT);
+
+ syncobj_destroy(fd, syncobjs[0]);
+ syncobj_destroy(fd, syncobjs[2]);
+}
+
+static void
+test_query_bad_pad(int fd)
+{
+ struct drm_syncobj_timeline_array array = {};
+ uint32_t handle = 0;
+ int ret;
+
+ array.pad = 0xdeadbeef;
+ array.count_handles = 1;
+ array.handles = to_user_pointer(&handle);
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_QUERY, &array);
+ igt_assert(ret == -1 && errno == EINVAL);
+}
+
+static void
+test_signal_zero_handles(int fd)
+{
+ struct drm_syncobj_timeline_array args = {};
+ int ret;
+
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);
+ igt_assert(ret == -1 && errno == EINVAL);
+}
+
+static void
+test_signal_illegal_handle(int fd)
+{
+ struct drm_syncobj_timeline_array args = {};
+ uint32_t handle = 0;
+ int ret;
+
+ args.count_handles = 1;
+ args.handles = to_user_pointer(&handle);
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);
+ igt_assert(ret == -1 && errno == ENOENT);
+}
+
+static void
+test_signal_illegal_point(int fd)
+{
+ struct drm_syncobj_timeline_array args = {};
+ uint32_t handle = 1;
+ uint64_t point = 0;
+ int ret;
+
+ args.count_handles = 1;
+ args.handles = to_user_pointer(&handle);
+ args.points = to_user_pointer(&point);
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);
+ igt_assert(ret == -1 && errno == ENOENT);
+}
+static void
+test_signal_one_illegal_handle(int fd)
+{
+ struct drm_syncobj_timeline_array array = {};
+ uint32_t syncobjs[3];
+ uint64_t initial_point = 1;
+ int ret;
+
+ syncobjs[0] = syncobj_create(fd, 0);
+ syncobjs[1] = 0;
+ syncobjs[2] = syncobj_create(fd, 0);
+
+ syncobj_timeline_signal(fd, &syncobjs[0], &initial_point, 1);
+ syncobj_timeline_signal(fd, &syncobjs[2], &initial_point, 1);
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobjs[0],
+ &initial_point, 1, 0, 0), 0);
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobjs[2],
+ &initial_point, 1, 0, 0), 0);
+
+ array.count_handles = 3;
+ array.handles = to_user_pointer(syncobjs);
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &array);
+ igt_assert(ret == -1 && errno == ENOENT);
+
+ syncobj_destroy(fd, syncobjs[0]);
+ syncobj_destroy(fd, syncobjs[2]);
+}
+
+static void
+test_signal_bad_pad(int fd)
+{
+ struct drm_syncobj_timeline_array array = {};
+ uint32_t handle = 0;
+ int ret;
+
+ array.pad = 0xdeadbeef;
+ array.count_handles = 1;
+ array.handles = to_user_pointer(&handle);
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &array);
+ igt_assert(ret == -1 && errno == EINVAL);
+}
+
+static void
+test_signal_array(int fd)
+{
+ uint32_t syncobjs[4];
+ uint64_t points[4] = {1, 1, 1, 0};
+
+ syncobjs[0] = syncobj_create(fd, 0);
+ syncobjs[1] = syncobj_create(fd, 0);
+ syncobjs[2] = syncobj_create(fd, 0);
+ syncobjs[3] = syncobj_create(fd, 0);
+
+ syncobj_timeline_signal(fd, syncobjs, points, 4);
+ igt_assert_eq(syncobj_timeline_wait_err(fd, syncobjs,
+ points, 3, 0, 0), 0);
+ igt_assert_eq(syncobj_wait_err(fd, &syncobjs[3], 1, 0, 0), 0);
+
+ syncobj_destroy(fd, syncobjs[0]);
+ syncobj_destroy(fd, syncobjs[1]);
+ syncobj_destroy(fd, syncobjs[2]);
+ syncobj_destroy(fd, syncobjs[3]);
+}
+
+static void
+test_transfer_illegal_handle(int fd)
+{
+ struct drm_syncobj_transfer args = {};
+ uint32_t handle = 0;
+ int ret;
+
+ args.src_handle = to_user_pointer(&handle);
+ args.dst_handle = to_user_pointer(&handle);
+ args.src_point = 1;
+ args.dst_point = 0;
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_TRANSFER, &args);
+ igt_assert(ret == -1 && errno == ENOENT);
+}
+
+static void
+test_transfer_bad_pad(int fd)
+{
+ struct drm_syncobj_transfer arg = {};
+ uint32_t handle = 0;
+ int ret;
+
+ arg.pad = 0xdeadbeef;
+ arg.src_handle = to_user_pointer(&handle);
+ arg.dst_handle = to_user_pointer(&handle);
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_TRANSFER, &arg);
+ igt_assert(ret == -1 && errno == EINVAL);
+}
+
+#define WAIT_FOR_SUBMIT (1 << 0)
+#define WAIT_ALL (1 << 1)
+#define WAIT_AVAILABLE (1 << 2)
+#define WAIT_UNSUBMITTED (1 << 3)
+#define WAIT_SUBMITTED (1 << 4)
+#define WAIT_SIGNALED (1 << 5)
+#define WAIT_FLAGS_MAX (1 << 6) - 1
+
+static uint32_t
+flags_for_test_flags(uint32_t test_flags)
+{
+ uint32_t flags = 0;
+
+ if (test_flags & WAIT_FOR_SUBMIT)
+ flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
+
+ if (test_flags & WAIT_AVAILABLE)
+ flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE;
+
+ if (test_flags & WAIT_ALL)
+ flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
+
+ return flags;
+}
+
+static void
+test_single_wait(int fd, uint32_t test_flags, int expect)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ uint32_t flags = flags_for_test_flags(test_flags);
+ uint64_t point = 1;
+ int timeline = -1;
+
+ if (test_flags & (WAIT_SUBMITTED | WAIT_SIGNALED))
+ timeline = syncobj_attach_sw_sync(fd, syncobj, point);
+
+ if (test_flags & WAIT_SIGNALED)
+ sw_sync_timeline_inc(timeline, 1);
+
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobj, &point, 1,
+ 0, flags), expect);
+
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobj, &point, 1,
+ short_timeout(), flags), expect);
+
+ if (expect != -ETIME) {
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobj, &point, 1,
+ UINT64_MAX, flags), expect);
+ }
+
+ syncobj_destroy(fd, syncobj);
+ if (timeline != -1)
+ close(timeline);
+}
+
+static void
+test_wait_delayed_signal(int fd, uint32_t test_flags)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ uint32_t flags = flags_for_test_flags(test_flags);
+ uint64_t point = 1;
+ int timeline = -1;
+ timer_t timer;
+
+ if (test_flags & WAIT_FOR_SUBMIT) {
+ timer = syncobj_trigger_delayed(fd, syncobj, point, SHORT_TIME_NSEC);
+ } else {
+ timeline = syncobj_attach_sw_sync(fd, syncobj, point);
+ timer = set_timer(timeline_inc_func, NULL,
+ timeline, SHORT_TIME_NSEC);
+ }
+
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &point, 1,
+ gettime_ns() + SHORT_TIME_NSEC * 2,
+ flags, NULL));
+
+ timer_delete(timer);
+
+ if (timeline != -1)
+ close(timeline);
+
+ syncobj_destroy(fd, syncobj);
+}
+
+static void
+test_reset_unsignaled(int fd)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ uint64_t point = 1;
+
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobj, &point,
+ 1, 0, 0), -EINVAL);
+
+ syncobj_reset(fd, &syncobj, 1);
+
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobj, &point,
+ 1, 0, 0), -EINVAL);
+
+ syncobj_destroy(fd, syncobj);
+}
+
+static void
+test_reset_signaled(int fd)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ uint64_t point = 1;
+
+ syncobj_trigger(fd, syncobj, point);
+
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobj, &point,
+ 1, 0, 0), 0);
+
+ syncobj_reset(fd, &syncobj, 1);
+
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobj, &point,
+ 1, 0, 0), -EINVAL);
+
+ syncobj_destroy(fd, syncobj);
+}
+
+static void
+test_reset_multiple_signaled(int fd)
+{
+ uint64_t points[3] = {1, 1, 1};
+ uint32_t syncobjs[3];
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ syncobjs[i] = syncobj_create(fd, 0);
+ syncobj_trigger(fd, syncobjs[i], points[i]);
+ }
+
+ igt_assert_eq(syncobj_timeline_wait_err(fd, syncobjs, points, 3, 0, 0), 0);
+
+ syncobj_reset(fd, syncobjs, 3);
+
+ for (i = 0; i < 3; i++) {
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobjs[i],
+ &points[i], 1,
+ 0, 0), -EINVAL);
+ syncobj_destroy(fd, syncobjs[i]);
+ }
+}
+
+static void
+reset_and_trigger_func(union sigval sigval)
+{
+ struct fd_handle_pair *pair = sigval.sival_ptr;
+ syncobj_reset(pair->fd, &pair->handle, 1);
+ syncobj_trigger(pair->fd, pair->handle, pair->point);
+}
+
+static void
+test_reset_during_wait_for_submit(int fd)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ uint32_t flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
+ struct fd_handle_pair pair;
+ uint64_t point = 1;
+ timer_t timer;
+
+ pair.fd = fd;
+ pair.handle = syncobj;
+ timer = set_timer(reset_and_trigger_func, &pair, 0, SHORT_TIME_NSEC);
+
+ /* A reset should be a no-op even if we're in the middle of a wait */
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &point, 1,
+ gettime_ns() + SHORT_TIME_NSEC * 2,
+ flags, NULL));
+
+ timer_delete(timer);
+
+ syncobj_destroy(fd, syncobj);
+}
+
+static void
+test_signal(int fd)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ uint32_t flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
+ uint64_t point = 1;
+
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobj, &point,
+ 1, 0, 0), -EINVAL);
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &syncobj, &point,
+ 1, 0, flags), -ETIME);
+
+ syncobj_timeline_signal(fd, &syncobj, &point, 1);
+
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &point, 1, 0, 0, NULL));
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &point, 1, 0, flags, NULL));
+
+ syncobj_destroy(fd, syncobj);
+}
+
+static void
+test_multi_wait(int fd, uint32_t test_flags, int expect)
+{
+ uint32_t tflag, flags;
+ int i, fidx, timeline;
+ uint64_t points[5] = {
+ 1 + rand() % 1000,
+ 0, /* non timeline syncobj */
+ 1 + rand() % 1000,
+ 1 + rand() % 1000,
+ 0, /* non timeline syncobj */
+ };
+ uint32_t syncobjs[ARRAY_SIZE(points)];
+
+ for (i = 0; i < ARRAY_SIZE(points); i++)
+ syncobjs[i] = syncobj_create(fd, 0);
+
+ flags = flags_for_test_flags(test_flags);
+ test_flags &= ~(WAIT_ALL | WAIT_FOR_SUBMIT | WAIT_AVAILABLE);
+
+ for (i = 0; i < ARRAY_SIZE(points); i++) {
+ fidx = ffs(test_flags) - 1;
+ tflag = (1 << fidx);
+
+ if (test_flags & ~tflag)
+ test_flags &= ~tflag;
+
+ if (tflag & (WAIT_SUBMITTED | WAIT_SIGNALED)) {
+ timeline = syncobj_attach_sw_sync(fd, syncobjs[i],
+ points[i]);
+ }
+ if (tflag & WAIT_SIGNALED)
+ sw_sync_timeline_inc(timeline, 1);
+ }
+
+ igt_assert_eq(syncobj_timeline_wait_err(fd, syncobjs,
+ points, ARRAY_SIZE(points),
+ 0, flags), expect);
+
+ igt_assert_eq(syncobj_timeline_wait_err(fd, syncobjs,
+ points, ARRAY_SIZE(points),
+ short_timeout(),
+ flags), expect);
+
+ if (expect != -ETIME) {
+ igt_assert_eq(syncobj_timeline_wait_err(fd, syncobjs,
+ points, ARRAY_SIZE(points),
+ UINT64_MAX,
+ flags), expect);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(points); i++)
+ syncobj_destroy(fd, syncobjs[i]);
+}
+
+struct wait_thread_data {
+ int fd;
+ struct drm_syncobj_timeline_wait wait;
+};
+
+static void *
+wait_thread_func(void *data)
+{
+ struct wait_thread_data *wait = data;
+ igt_assert_eq(__syncobj_timeline_wait_ioctl(wait->fd, &wait->wait), 0);
+ return NULL;
+}
+
+static void
+test_wait_snapshot(int fd, uint32_t test_flags)
+{
+ struct wait_thread_data wait = {};
+ uint32_t syncobjs[2];
+ uint64_t points[2] = {1, 1};
+ int timelines[3] = { -1, -1, -1 };
+ pthread_t thread;
+
+ syncobjs[0] = syncobj_create(fd, 0);
+ syncobjs[1] = syncobj_create(fd, 0);
+
+ if (!(test_flags & WAIT_FOR_SUBMIT)) {
+ timelines[0] = syncobj_attach_sw_sync(fd, syncobjs[0], points[0]);
+ timelines[1] = syncobj_attach_sw_sync(fd, syncobjs[1], points[1]);
+ }
+
+ wait.fd = fd;
+ wait.wait.handles = to_user_pointer(syncobjs);
+ wait.wait.count_handles = 2;
+ wait.wait.points = to_user_pointer(points);
+ wait.wait.timeout_nsec = short_timeout();
+ wait.wait.flags = flags_for_test_flags(test_flags);
+
+ igt_assert_eq(pthread_create(&thread, NULL, wait_thread_func, &wait), 0);
+
+ sleep_nsec(SHORT_TIME_NSEC / 5);
+
+ /* Try to fake the kernel out by triggering or partially triggering
+ * the first fence.
+ */
+ if (test_flags & WAIT_ALL) {
+ /* If it's WAIT_ALL, actually trigger it */
+ if (timelines[0] == -1)
+ syncobj_trigger(fd, syncobjs[0], points[0]);
+ else
+ sw_sync_timeline_inc(timelines[0], 1);
+ } else if (test_flags & WAIT_FOR_SUBMIT) {
+ timelines[0] = syncobj_attach_sw_sync(fd, syncobjs[0], points[0]);
+ }
+
+ sleep_nsec(SHORT_TIME_NSEC / 5);
+
+ /* Then reset it */
+ syncobj_reset(fd, &syncobjs[0], 1);
+
+ sleep_nsec(SHORT_TIME_NSEC / 5);
+
+ /* Then "submit" it in a way that will never trigger. This way, if
+ * the kernel picks up on the new fence (it shouldn't), we'll get a
+ * timeout.
+ */
+ timelines[2] = syncobj_attach_sw_sync(fd, syncobjs[0], points[0]);
+
+ sleep_nsec(SHORT_TIME_NSEC / 5);
+
+ /* Now trigger the second fence to complete the wait */
+
+ if (timelines[1] == -1)
+ syncobj_trigger(fd, syncobjs[1], points[1]);
+ else
+ sw_sync_timeline_inc(timelines[1], 1);
+
+ pthread_join(thread, NULL);
+
+ if (!(test_flags & WAIT_ALL))
+ igt_assert_eq(wait.wait.first_signaled, 1);
+
+ close(timelines[0]);
+ close(timelines[1]);
+ close(timelines[2]);
+ syncobj_destroy(fd, syncobjs[0]);
+ syncobj_destroy(fd, syncobjs[1]);
+}
+
+/* The numbers 0-7, each repeated 5x and shuffled. */
+static const unsigned shuffled_0_7_x4[] = {
+ 2, 0, 6, 1, 1, 4, 5, 2, 0, 7, 1, 7, 6, 3, 4, 5,
+ 0, 2, 7, 3, 5, 4, 0, 6, 7, 3, 2, 5, 6, 1, 4, 3,
+};
+
+enum syncobj_stage {
+ STAGE_UNSUBMITTED,
+ STAGE_SUBMITTED,
+ STAGE_SIGNALED,
+ STAGE_RESET,
+ STAGE_RESUBMITTED,
+};
+
+static void
+test_wait_complex(int fd, uint32_t test_flags)
+{
+ struct wait_thread_data wait = {};
+ uint32_t syncobjs[8];
+ uint64_t points[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ enum syncobj_stage stage[8];
+ int i, j, timelines[8];
+ uint32_t first_signaled = -1, num_signaled = 0;
+ pthread_t thread;
+
+ for (i = 0; i < 8; i++) {
+ stage[i] = STAGE_UNSUBMITTED;
+ syncobjs[i] = syncobj_create(fd, 0);
+ }
+
+ if (test_flags & WAIT_FOR_SUBMIT) {
+ for (i = 0; i < 8; i++)
+ timelines[i] = -1;
+ } else {
+ for (i = 0; i < 8; i++)
+ timelines[i] = syncobj_attach_sw_sync(fd, syncobjs[i],
+ points[i]);
+ }
+
+ wait.fd = fd;
+ wait.wait.handles = to_user_pointer(syncobjs);
+ wait.wait.count_handles = 2;
+ wait.wait.points = to_user_pointer(points);
+ wait.wait.timeout_nsec = gettime_ns() + NSECS_PER_SEC;
+ wait.wait.flags = flags_for_test_flags(test_flags);
+
+ igt_assert_eq(pthread_create(&thread, NULL, wait_thread_func, &wait), 0);
+
+ sleep_nsec(NSECS_PER_SEC / 50);
+
+ num_signaled = 0;
+ for (j = 0; j < ARRAY_SIZE(shuffled_0_7_x4); j++) {
+ i = shuffled_0_7_x4[j];
+ igt_assert_lt(i, ARRAY_SIZE(syncobjs));
+
+ switch (stage[i]++) {
+ case STAGE_UNSUBMITTED:
+ /* We need to submit attach a fence */
+ if (!(test_flags & WAIT_FOR_SUBMIT)) {
+ /* We had to attach one up-front */
+ igt_assert_neq(timelines[i], -1);
+ break;
+ }
+ timelines[i] = syncobj_attach_sw_sync(fd, syncobjs[i],
+ points[i]);
+ break;
+
+ case STAGE_SUBMITTED:
+ /* We have a fence, trigger it */
+ igt_assert_neq(timelines[i], -1);
+ sw_sync_timeline_inc(timelines[i], 1);
+ close(timelines[i]);
+ timelines[i] = -1;
+ if (num_signaled == 0)
+ first_signaled = i;
+ num_signaled++;
+ break;
+
+ case STAGE_SIGNALED:
+ /* We're already signaled, reset */
+ syncobj_reset(fd, &syncobjs[i], 1);
+ break;
+
+ case STAGE_RESET:
+ /* We're reset, submit and don't signal */
+ timelines[i] = syncobj_attach_sw_sync(fd, syncobjs[i],
+ points[i]);
+ break;
+
+ case STAGE_RESUBMITTED:
+ igt_assert(!"Should not reach this stage");
+ break;
+ }
+
+ if (test_flags & WAIT_ALL) {
+ if (num_signaled == ARRAY_SIZE(syncobjs))
+ break;
+ } else {
+ if (num_signaled > 0)
+ break;
+ }
+
+ sleep_nsec(NSECS_PER_SEC / 100);
+ }
+
+ pthread_join(thread, NULL);
+
+ if (test_flags & WAIT_ALL) {
+ igt_assert_eq(num_signaled, ARRAY_SIZE(syncobjs));
+ } else {
+ igt_assert_eq(num_signaled, 1);
+ igt_assert_eq(wait.wait.first_signaled, first_signaled);
+ }
+
+ for (i = 0; i < 8; i++) {
+ close(timelines[i]);
+ syncobj_destroy(fd, syncobjs[i]);
+ }
+}
+
+static void
+test_wait_interrupted(int fd, uint32_t test_flags)
+{
+ struct drm_syncobj_timeline_wait wait = {};
+ uint32_t syncobj = syncobj_create(fd, 0);
+ uint64_t point = 1;
+ int timeline;
+
+ wait.handles = to_user_pointer(&syncobj);
+ wait.points = to_user_pointer(&point);
+ wait.count_handles = 1;
+ wait.flags = flags_for_test_flags(test_flags);
+
+ if (test_flags & WAIT_FOR_SUBMIT) {
+ wait.timeout_nsec = short_timeout();
+ igt_while_interruptible(true)
+ igt_assert_eq(__syncobj_timeline_wait_ioctl(fd, &wait), -ETIME);
+ }
+
+ timeline = syncobj_attach_sw_sync(fd, syncobj, point);
+
+ wait.timeout_nsec = short_timeout();
+ igt_while_interruptible(true)
+ igt_assert_eq(__syncobj_timeline_wait_ioctl(fd, &wait), -ETIME);
+
+ syncobj_destroy(fd, syncobj);
+ close(timeline);
+}
+
+static bool
+has_syncobj_timeline_wait(int fd)
+{
+ struct drm_syncobj_timeline_wait wait = {};
+ uint32_t handle = 0;
+ uint64_t value;
+ int ret;
+
+ if (drmGetCap(fd, DRM_CAP_SYNCOBJ_TIMELINE, &value))
+ return false;
+ if (!value)
+ return false;
+
+ /* Try waiting for zero sync objects should fail with EINVAL */
+ wait.count_handles = 1;
+ wait.handles = to_user_pointer(&handle);
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &wait);
+ return ret == -1 && errno == ENOENT;
+}
+
+igt_main
+{
+ int fd = -1;
+
+ igt_fixture {
+ fd = drm_open_driver(DRIVER_ANY);
+ igt_require(has_syncobj_timeline_wait(fd));
+ igt_require_sw_sync();
+ }
+
+ igt_subtest("invalid-wait-bad-flags")
+ test_wait_bad_flags(fd);
+
+ igt_subtest("invalid-wait-zero-handles")
+ test_wait_zero_handles(fd);
+
+ igt_subtest("invalid-wait-illegal-handle")
+ test_wait_illegal_handle(fd);
+
+ igt_subtest("invalid-query-zero-handles")
+ test_query_zero_handles(fd);
+
+ igt_subtest("invalid-query-illegal-handle")
+ test_query_illegal_handle(fd);
+
+ igt_subtest("invalid-query-one-illegal-handle")
+ test_query_one_illegal_handle(fd);
+
+ igt_subtest("invalid-query-bad-pad")
+ test_query_bad_pad(fd);
+
+ igt_subtest("invalid-signal-zero-handles")
+ test_signal_zero_handles(fd);
+
+ igt_subtest("invalid-signal-illegal-handle")
+ test_signal_illegal_handle(fd);
+
+ igt_subtest("invalid-signal-illegal-point")
+ test_signal_illegal_point(fd);
+
+ igt_subtest("invalid-signal-one-illegal-handle")
+ test_signal_one_illegal_handle(fd);
+
+ igt_subtest("invalid-signal-bad-pad")
+ test_signal_bad_pad(fd);
+
+ igt_subtest("invalid-signal-array")
+ test_signal_array(fd);
+
+ igt_subtest("invalid-transfer-illegal-handle")
+ test_transfer_illegal_handle(fd);
+
+ igt_subtest("invalid-transfer-bad-pad")
+ test_transfer_bad_pad(fd);
+
+ for (unsigned flags = 0; flags < WAIT_FLAGS_MAX; flags++) {
+ int err;
+
+ /* Only one wait mode for single-wait tests */
+ if (__builtin_popcount(flags & (WAIT_UNSUBMITTED |
+ WAIT_SUBMITTED |
+ WAIT_SIGNALED)) != 1)
+ continue;
+
+ if ((flags & WAIT_UNSUBMITTED) && !(flags & WAIT_FOR_SUBMIT))
+ err = -EINVAL;
+ else if (!(flags & WAIT_SIGNALED) && !((flags & WAIT_SUBMITTED) && (flags & WAIT_AVAILABLE)))
+ err = -ETIME;
+ else
+ err = 0;
+
+ igt_subtest_f("%ssingle-wait%s%s%s%s%s%s",
+ err == -EINVAL ? "invalid-" : err == -ETIME ? "etime-" : "",
+ (flags & WAIT_ALL) ? "-all" : "",
+ (flags & WAIT_FOR_SUBMIT) ? "-for-submit" : "",
+ (flags & WAIT_AVAILABLE) ? "-available" : "",
+ (flags & WAIT_UNSUBMITTED) ? "-unsubmitted" : "",
+ (flags & WAIT_SUBMITTED) ? "-submitted" : "",
+ (flags & WAIT_SIGNALED) ? "-signaled" : "")
+ test_single_wait(fd, flags, err);
+ }
+
+ igt_subtest("wait-delayed-signal")
+ test_wait_delayed_signal(fd, 0);
+
+ igt_subtest("wait-for-submit-delayed-submit")
+ test_wait_delayed_signal(fd, WAIT_FOR_SUBMIT);
+
+ igt_subtest("wait-all-delayed-signal")
+ test_wait_delayed_signal(fd, WAIT_ALL);
+
+ igt_subtest("wait-all-for-submit-delayed-submit")
+ test_wait_delayed_signal(fd, WAIT_ALL | WAIT_FOR_SUBMIT);
+
+ igt_subtest("reset-unsignaled")
+ test_reset_unsignaled(fd);
+
+ igt_subtest("reset-signaled")
+ test_reset_signaled(fd);
+
+ igt_subtest("reset-multiple-signaled")
+ test_reset_multiple_signaled(fd);
+
+ igt_subtest("reset-during-wait-for-submit")
+ test_reset_during_wait_for_submit(fd);
+
+ igt_subtest("signal")
+ test_signal(fd);
+
+ for (unsigned flags = 0; flags < WAIT_FLAGS_MAX; flags++) {
+ int err;
+
+ /* At least one wait mode for multi-wait tests */
+ if (!(flags & (WAIT_UNSUBMITTED |
+ WAIT_SUBMITTED |
+ WAIT_SIGNALED)))
+ continue;
+
+ err = 0;
+ if ((flags & WAIT_UNSUBMITTED) && !(flags & WAIT_FOR_SUBMIT)) {
+ err = -EINVAL;
+ } else if (flags & WAIT_ALL) {
+ if (flags & (WAIT_UNSUBMITTED | WAIT_SUBMITTED))
+ err = -ETIME;
+ if (!(flags & WAIT_UNSUBMITTED) && (flags & WAIT_SUBMITTED) && (flags & WAIT_AVAILABLE))
+ err = 0;
+ } else {
+ if (!(flags & WAIT_SIGNALED) && !((flags & WAIT_SUBMITTED) && (flags & WAIT_AVAILABLE)))
+ err = -ETIME;
+ }
+
+ igt_subtest_f("%smulti-wait%s%s%s%s%s%s",
+ err == -EINVAL ? "invalid-" : err == -ETIME ? "etime-" : "",
+ (flags & WAIT_ALL) ? "-all" : "",
+ (flags & WAIT_FOR_SUBMIT) ? "-for-submit" : "",
+ (flags & WAIT_AVAILABLE) ? "-available" : "",
+ (flags & WAIT_UNSUBMITTED) ? "-unsubmitted" : "",
+ (flags & WAIT_SUBMITTED) ? "-submitted" : "",
+ (flags & WAIT_SIGNALED) ? "-signaled" : "")
+ test_multi_wait(fd, flags, err);
+ }
+ igt_subtest("wait-any-snapshot")
+ test_wait_snapshot(fd, 0);
+
+ igt_subtest("wait-all-snapshot")
+ test_wait_snapshot(fd, WAIT_ALL);
+
+ igt_subtest("wait-for-submit-snapshot")
+ test_wait_snapshot(fd, WAIT_FOR_SUBMIT);
+
+ igt_subtest("wait-all-for-submit-snapshot")
+ test_wait_snapshot(fd, WAIT_ALL | WAIT_FOR_SUBMIT);
+
+ igt_subtest("wait-any-complex")
+ test_wait_complex(fd, 0);
+
+ igt_subtest("wait-all-complex")
+ test_wait_complex(fd, WAIT_ALL);
+
+ igt_subtest("wait-for-submit-complex")
+ test_wait_complex(fd, WAIT_FOR_SUBMIT);
+
+ igt_subtest("wait-all-for-submit-complex")
+ test_wait_complex(fd, WAIT_ALL | WAIT_FOR_SUBMIT);
+
+ igt_subtest("wait-any-interrupted")
+ test_wait_interrupted(fd, 0);
+
+ igt_subtest("wait-all-interrupted")
+ test_wait_interrupted(fd, WAIT_ALL);
+}
--
2.22.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 21+ messages in thread* Re: [igt-dev] [PATCH i-g-t 06/10] igt: add timeline test cases
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 06/10] igt: add timeline test cases Lionel Landwerlin
@ 2019-07-26 2:56 ` zhoucm1
2019-07-26 9:07 ` Lionel Landwerlin
0 siblings, 1 reply; 21+ messages in thread
From: zhoucm1 @ 2019-07-26 2:56 UTC (permalink / raw)
To: Lionel Landwerlin, igt-dev; +Cc: Chunming Zhou
On 2019年07月25日 18:30, Lionel Landwerlin wrote:
> +static void
> +test_query_bad_pad(int fd)
> +{
> + struct drm_syncobj_timeline_array array = {};
> + uint32_t handle = 0;
> + int ret;
> +
> + array.pad = 0xdeadbeef;
> + array.count_handles = 1;
> + array.handles = to_user_pointer(&handle);
> + ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_QUERY, &array);
> + igt_assert(ret == -1 && errno == EINVAL);
> +}
We need remove this testing case as we extend this pad to be flags.
-David
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [igt-dev] [PATCH i-g-t 06/10] igt: add timeline test cases
2019-07-26 2:56 ` zhoucm1
@ 2019-07-26 9:07 ` Lionel Landwerlin
0 siblings, 0 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-26 9:07 UTC (permalink / raw)
To: zhoucm1, igt-dev; +Cc: Chunming Zhou
On 26/07/2019 05:56, zhoucm1 wrote:
>
>
> On 2019年07月25日 18:30, Lionel Landwerlin wrote:
>> +static void
>> +test_query_bad_pad(int fd)
>> +{
>> + struct drm_syncobj_timeline_array array = {};
>> + uint32_t handle = 0;
>> + int ret;
>> +
>> + array.pad = 0xdeadbeef;
>> + array.count_handles = 1;
>> + array.handles = to_user_pointer(&handle);
>> + ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_QUERY, &array);
>> + igt_assert(ret == -1 && errno == EINVAL);
>> +}
> We need remove this testing case as we extend this pad to be flags.
>
> -David
>
Thanks, will do once it's landed.
-Lionel
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 21+ messages in thread
* [igt-dev] [PATCH i-g-t 07/10] tests/syncobj_timeline: add more timeline tests
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
` (5 preceding siblings ...)
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 06/10] igt: add timeline test cases Lionel Landwerlin
@ 2019-07-25 10:30 ` Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 08/10] tests/i915/exec_fence: switch to internal headers Lionel Landwerlin
` (5 subsequent siblings)
12 siblings, 0 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:30 UTC (permalink / raw)
To: igt-dev
Including ordering tests and 32bit limit.
v2: add point 0 signaling test (Lionel)
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
lib/igt_syncobj.c | 73 ++++----
lib/igt_syncobj.h | 3 +
tests/syncobj_timeline.c | 374 +++++++++++++++++++++++++++++++++++++++
3 files changed, 413 insertions(+), 37 deletions(-)
diff --git a/lib/igt_syncobj.c b/lib/igt_syncobj.c
index 318078f6..3e92eb76 100644
--- a/lib/igt_syncobj.c
+++ b/lib/igt_syncobj.c
@@ -438,17 +438,19 @@ syncobj_timeline_query(int fd, uint32_t *handles, uint64_t *points,
}
static int
-__syncobj_binary_to_timeline(int fd, uint32_t timeline_handle,
- uint64_t point, uint32_t binary_handle)
+__syncobj_transfer(int fd,
+ uint32_t handle_dst, uint64_t point_dst,
+ uint32_t handle_src, uint64_t point_src,
+ uint32_t flags)
{
struct drm_syncobj_transfer args;
int ret;
- args.src_handle = binary_handle;
- args.dst_handle = timeline_handle;
- args.src_point = 0;
- args.dst_point = point;
- args.flags = 0;
+ args.src_handle = handle_src;
+ args.dst_handle = handle_dst;
+ args.src_point = point_src;
+ args.dst_point = point_dst;
+ args.flags = flags;
args.pad = 0;
ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_TRANSFER, &args);
if (ret) {
@@ -474,33 +476,9 @@ void
syncobj_binary_to_timeline(int fd, uint32_t timeline_handle,
uint64_t point, uint32_t binary_handle)
{
- igt_assert_eq(__syncobj_binary_to_timeline(fd, timeline_handle, point,
- binary_handle), 0);
-}
-
-static int
-__syncobj_timeline_to_binary(int fd, uint32_t binary_handle,
- uint32_t timeline_handle,
- uint64_t point,
- uint32_t flags)
-{
- struct drm_syncobj_transfer args;
- int ret;
-
- args.dst_handle = binary_handle;
- args.src_handle = timeline_handle;
- args.dst_point = 0;
- args.src_point = point;
- args.flags = flags;
- args.pad = 0;
- ret = drmIoctl(fd, DRM_IOCTL_SYNCOBJ_TRANSFER, &args);
- if (ret) {
- ret = -errno;
- igt_assert(ret);
- }
-
- errno = 0;
- return ret;
+ igt_assert_eq(__syncobj_transfer(fd,
+ timeline_handle, point,
+ binary_handle, 0, 0), 0);
}
/**
@@ -519,7 +497,28 @@ syncobj_timeline_to_binary(int fd, uint32_t binary_handle,
uint64_t point,
uint32_t flags)
{
- igt_assert_eq(__syncobj_timeline_to_binary(fd, binary_handle,
- timeline_handle, point,
- flags), 0);
+ igt_assert_eq(__syncobj_transfer(fd,
+ binary_handle, 0,
+ timeline_handle, point,
+ flags), 0);
+}
+
+/**
+ * syncobj_timeline_to_timeline:
+ * @fd: The DRM file descriptor.
+ * @timeline_src: A timeline syncobj handle
+ * @timeline_dst: A timeline syncobj handle
+ * @point_src: A point on the source timeline syncobj
+ * @point_dst: A point on the destination timeline syncobj
+ *
+ * query a set of syncobjs.
+ */
+void
+syncobj_timeline_to_timeline(int fd,
+ uint64_t timeline_dst, uint32_t point_dst,
+ uint64_t timeline_src, uint32_t point_src)
+{
+ igt_assert_eq(__syncobj_transfer(fd,
+ timeline_dst, point_dst,
+ timeline_src, point_src, 0), 0);
}
diff --git a/lib/igt_syncobj.h b/lib/igt_syncobj.h
index 20f1f18f..e6725671 100644
--- a/lib/igt_syncobj.h
+++ b/lib/igt_syncobj.h
@@ -60,6 +60,9 @@ void syncobj_timeline_to_binary(int fd, uint32_t binary_handle,
uint32_t timeline_handle,
uint64_t point,
uint32_t flags);
+void syncobj_timeline_to_timeline(int fd,
+ uint64_t timeline_dst, uint32_t point_dst,
+ uint64_t timeline_src, uint32_t point_src);
void syncobj_timeline_signal(int fd, uint32_t *handles, uint64_t *points,
uint32_t count);
diff --git a/tests/syncobj_timeline.c b/tests/syncobj_timeline.c
index 7fd602de..04e88d55 100644
--- a/tests/syncobj_timeline.c
+++ b/tests/syncobj_timeline.c
@@ -369,6 +369,26 @@ test_transfer_bad_pad(int fd)
igt_assert(ret == -1 && errno == EINVAL);
}
+static void
+test_transfer_nonexistent_point(int fd)
+{
+ struct drm_syncobj_transfer arg = {};
+ uint32_t handle = syncobj_create(fd, 0);
+ uint64_t value = 63;
+ int ret;
+
+ syncobj_timeline_signal(fd, &handle, &value, 1);
+
+ arg.src_handle = handle;
+ arg.dst_handle = handle;
+ arg.src_point = value; /* Point doesn't exist */
+ arg.dst_point = value + 11;
+ ret = igt_ioctl(fd, DRM_IOCTL_SYNCOBJ_TRANSFER, &arg);
+ igt_assert(ret == 0);
+
+ syncobj_destroy(fd, handle);
+}
+
#define WAIT_FOR_SUBMIT (1 << 0)
#define WAIT_ALL (1 << 1)
#define WAIT_AVAILABLE (1 << 2)
@@ -377,6 +397,50 @@ test_transfer_bad_pad(int fd)
#define WAIT_SIGNALED (1 << 5)
#define WAIT_FLAGS_MAX (1 << 6) - 1
+static void
+test_transfer_point(int fd)
+{
+ int timeline = sw_sync_timeline_create();
+ uint32_t handle = syncobj_create(fd, 0);
+ uint64_t value;
+
+ {
+ int sw_fence = sw_sync_timeline_create_fence(timeline, 1);
+ uint32_t tmp_syncobj = syncobj_create(fd, 0);
+
+ syncobj_import_sync_file(fd, tmp_syncobj, sw_fence);
+ syncobj_binary_to_timeline(fd, handle, 1, tmp_syncobj);
+ close(sw_fence);
+ syncobj_destroy(fd, tmp_syncobj);
+ }
+
+ syncobj_timeline_query(fd, &handle, &value, 1);
+ igt_assert_eq(value, 0);
+
+ value = 1;
+ igt_assert_eq(syncobj_timeline_wait_err(fd, &handle, &value,
+ 1, 0, WAIT_ALL), -ETIME);
+
+ sw_sync_timeline_inc(timeline, 1);
+
+ syncobj_timeline_query(fd, &handle, &value, 1);
+ igt_assert_eq(value, 1);
+
+ igt_assert(syncobj_timeline_wait(fd, &handle, &value,
+ 1, 0, WAIT_ALL, NULL));
+
+ value = 2;
+ syncobj_timeline_signal(fd, &handle, &value, 1);
+
+ syncobj_timeline_to_timeline(fd, handle, 3, handle, 2);
+
+ syncobj_timeline_query(fd, &handle, &value, 1);
+ igt_assert_eq(value, 3);
+
+ syncobj_destroy(fd, handle);
+ close(timeline);
+}
+
static uint32_t
flags_for_test_flags(uint32_t test_flags)
{
@@ -564,6 +628,21 @@ test_signal(int fd)
syncobj_destroy(fd, syncobj);
}
+static void
+test_signal_point_0(int fd)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ uint32_t flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
+ uint64_t point = 0;
+
+ syncobj_timeline_signal(fd, &syncobj, &point, 1);
+
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &point, 1, 0, 0, NULL));
+ igt_assert(syncobj_wait(fd, &syncobj, 1, 0, flags, NULL));
+
+ syncobj_destroy(fd, syncobj);
+}
+
static void
test_multi_wait(int fd, uint32_t test_flags, int expect)
{
@@ -856,6 +935,277 @@ test_wait_interrupted(int fd, uint32_t test_flags)
close(timeline);
}
+/*
+ * Verifies that as we signal points from the host, the syncobj
+ * timeline value increments and that waits for submits/signals works
+ * properly.
+ */
+static void
+test_host_signal_points(int fd)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ uint64_t value = 0;
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ uint64_t query_value = 0;
+
+ value += rand();
+
+ syncobj_timeline_signal(fd, &syncobj, &value, 1);
+
+ syncobj_timeline_query(fd, &syncobj, &query_value, 1);
+ igt_assert_eq(query_value, value);
+
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &query_value,
+ 1, 0, WAIT_FOR_SUBMIT, NULL));
+
+ query_value -= 1;
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &query_value,
+ 1, 0, WAIT_ALL, NULL));
+ }
+
+ syncobj_destroy(fd, syncobj);
+}
+
+/*
+ * Verifies that a device signaling fences out of order on the
+ * timeline still increments the timeline monotonically and that waits
+ * work properly.
+ */
+static void
+test_device_signal_unordered(int fd)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ int point_indices[] = { 0, 2, 1, 4, 3 };
+ bool signaled[ARRAY_SIZE(point_indices)] = {};
+ int fences[ARRAY_SIZE(point_indices)];
+ int timeline = sw_sync_timeline_create();
+ uint64_t value = 0;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(fences); i++) {
+ fences[point_indices[i]] = sw_sync_timeline_create_fence(timeline, i + 1);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fences); i++) {
+ uint32_t tmp_syncobj = syncobj_create(fd, 0);
+
+ syncobj_import_sync_file(fd, tmp_syncobj, fences[i]);
+ syncobj_binary_to_timeline(fd, syncobj, i + 1, tmp_syncobj);
+ syncobj_destroy(fd, tmp_syncobj);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fences); i++) {
+ uint64_t query_value = 0;
+ uint64_t min_value = 0;
+
+ sw_sync_timeline_inc(timeline, 1);
+
+ signaled[point_indices[i]] = true;
+
+ /*
+ * Compute a minimum value of the timeline based of
+ * the smallest signaled point.
+ */
+ for (j = 0; j < ARRAY_SIZE(signaled); j++) {
+ if (!signaled[j])
+ break;
+ min_value = j;
+ }
+
+ syncobj_timeline_query(fd, &syncobj, &query_value, 1);
+ igt_assert(query_value >= min_value);
+ igt_assert(query_value >= value);
+
+ igt_debug("signaling point %i, timeline value = %" PRIu64 "\n",
+ point_indices[i] + 1, query_value);
+
+ value = max(query_value, value);
+
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &query_value,
+ 1, 0, WAIT_FOR_SUBMIT, NULL));
+
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &query_value,
+ 1, 0, WAIT_ALL, NULL));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fences); i++)
+ close(fences[i]);
+
+ syncobj_destroy(fd, syncobj);
+ close(timeline);
+}
+
+/*
+ * Verifies that submitting out of order doesn't break the timeline.
+ */
+static void
+test_device_submit_unordered(int fd)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ uint64_t points[] = { 1, 5, 3, 6, 7 };
+ int timeline = sw_sync_timeline_create();
+ uint64_t query_value;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(points); i++) {
+ int fence = sw_sync_timeline_create_fence(timeline, i + 1);
+ uint32_t tmp_syncobj = syncobj_create(fd, 0);
+
+ syncobj_import_sync_file(fd, tmp_syncobj, fence);
+ syncobj_binary_to_timeline(fd, syncobj, points[i], tmp_syncobj);
+ close(fence);
+ syncobj_destroy(fd, tmp_syncobj);
+ }
+
+ /*
+ * Signal points 1, 5 & 3. There are no other points <= 5 so
+ * waiting on 5 should return immediately for submission &
+ * signaling.
+ */
+ sw_sync_timeline_inc(timeline, 3);
+
+ syncobj_timeline_query(fd, &syncobj, &query_value, 1);
+ igt_assert_eq(query_value, 5);
+
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &query_value,
+ 1, 0, WAIT_FOR_SUBMIT, NULL));
+
+ igt_assert(syncobj_timeline_wait(fd, &syncobj, &query_value,
+ 1, 0, WAIT_ALL, NULL));
+
+ syncobj_destroy(fd, syncobj);
+ close(timeline);
+}
+
+/*
+ * Verifies that the host signaling fences out of order on the
+ * timeline still increments the timeline monotonically and that waits
+ * work properly.
+ */
+static void
+test_host_signal_ordered(int fd)
+{
+ uint32_t syncobj = syncobj_create(fd, 0);
+ int timeline = sw_sync_timeline_create();
+ uint64_t host_signal_value = 8, query_value;
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ int fence = sw_sync_timeline_create_fence(timeline, i + 1);
+ uint32_t tmp_syncobj = syncobj_create(fd, 0);
+
+ syncobj_import_sync_file(fd, tmp_syncobj, fence);
+ syncobj_binary_to_timeline(fd, syncobj, i + 1, tmp_syncobj);
+ syncobj_destroy(fd, tmp_syncobj);
+ close(fence);
+ }
+
+ sw_sync_timeline_inc(timeline, 3);
+
+ syncobj_timeline_query(fd, &syncobj, &query_value, 1);
+ igt_assert_eq(query_value, 3);
+
+ syncobj_timeline_signal(fd, &syncobj, &host_signal_value, 1);
+
+ syncobj_timeline_query(fd, &syncobj, &query_value, 1);
+ igt_assert_eq(query_value, 3);
+
+ sw_sync_timeline_inc(timeline, 5);
+
+ syncobj_timeline_query(fd, &syncobj, &query_value, 1);
+ igt_assert_eq(query_value, 8);
+
+ syncobj_destroy(fd, syncobj);
+ close(timeline);
+}
+
+struct checker_thread_data {
+ int fd;
+ uint32_t syncobj;
+ bool running;
+ bool started;
+};
+
+static void *
+checker_thread_func(void *_data)
+{
+ struct checker_thread_data *data = _data;
+ uint64_t value, last_value = 0;
+
+ while (data->running) {
+ syncobj_timeline_query(data->fd, &data->syncobj, &value, 1);
+
+ data->started = true;
+
+ igt_assert(last_value <= value);
+ last_value = value;
+ }
+
+ return NULL;
+}
+
+/*
+ * Verifies that signaling around the int32_t limit. For compatibility
+ * reason, the handling of seqnos in the dma-fences can consider a
+ * seqnoA is prior seqnoB even though seqnoA > seqnoB. We ran into
+ * that issue and it was fixed in :
+ *
+ * commit b312d8ca3a7cebe19941d969a51f2b7f899b81e2
+ * Author: Christian König <christian.koenig@amd.com>
+ * Date: Wed Nov 14 16:11:06 2018 +0100
+ *
+ * dma-buf: make fence sequence numbers 64 bit v2
+ *
+ */
+static void
+test_32bits_limit(int fd)
+{
+ struct checker_thread_data thread_data = {
+ .fd = fd,
+ .syncobj = syncobj_create(fd, 0),
+ .running = true,
+ .started = false,
+ };
+ int timeline = sw_sync_timeline_create();
+ uint64_t limit_diff = (1ull << 31) - 1;
+ uint64_t points[] = { 1, 5, limit_diff + 5, limit_diff + 6, limit_diff * 2, };
+ pthread_t thread;
+ uint64_t value, last_value;
+ int i;
+
+ igt_assert_eq(pthread_create(&thread, NULL, checker_thread_func, &thread_data), 0);
+
+ while (!thread_data.started);
+
+ for (i = 0; i < ARRAY_SIZE(points); i++) {
+ int fence = sw_sync_timeline_create_fence(timeline, i + 1);
+ uint32_t tmp_syncobj = syncobj_create(fd, 0);
+
+ syncobj_import_sync_file(fd, tmp_syncobj, fence);
+ syncobj_binary_to_timeline(fd, thread_data.syncobj, points[i], tmp_syncobj);
+ close(fence);
+ syncobj_destroy(fd, tmp_syncobj);
+ }
+
+ last_value = 0;
+ for (i = 0; i < ARRAY_SIZE(points); i++) {
+ sw_sync_timeline_inc(timeline, 1);
+
+ syncobj_timeline_query(fd, &thread_data.syncobj, &value, 1);
+ igt_assert(last_value <= value);
+
+ last_value = value;
+ }
+
+ thread_data.running = false;
+ pthread_join(thread, NULL);
+
+ syncobj_destroy(fd, thread_data.syncobj);
+ close(timeline);
+}
+
static bool
has_syncobj_timeline_wait(int fd)
{
@@ -931,6 +1281,12 @@ igt_main
igt_subtest("invalid-transfer-bad-pad")
test_transfer_bad_pad(fd);
+ igt_subtest("invalid-transfer-non-existent-point")
+ test_transfer_nonexistent_point(fd);
+
+ igt_subtest("transfer-timeline-point")
+ test_transfer_point(fd);
+
for (unsigned flags = 0; flags < WAIT_FLAGS_MAX; flags++) {
int err;
@@ -985,6 +1341,9 @@ igt_main
igt_subtest("signal")
test_signal(fd);
+ igt_subtest("signal-point-0")
+ test_signal_point_0(fd);
+
for (unsigned flags = 0; flags < WAIT_FLAGS_MAX; flags++) {
int err;
@@ -1046,4 +1405,19 @@ igt_main
igt_subtest("wait-all-interrupted")
test_wait_interrupted(fd, WAIT_ALL);
+
+ igt_subtest("host-signal-points")
+ test_host_signal_points(fd);
+
+ igt_subtest("device-signal-unordered")
+ test_device_signal_unordered(fd);
+
+ igt_subtest("device-submit-unordered")
+ test_device_submit_unordered(fd);
+
+ igt_subtest("host-signal-ordered")
+ test_host_signal_ordered(fd);
+
+ igt_subtest("32bits-limit")
+ test_32bits_limit(fd);
}
--
2.22.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 21+ messages in thread* [igt-dev] [PATCH i-g-t 08/10] tests/i915/exec_fence: switch to internal headers
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
` (6 preceding siblings ...)
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 07/10] tests/syncobj_timeline: add more timeline tests Lionel Landwerlin
@ 2019-07-25 10:30 ` Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 09/10] tests/i915/exec_fence: reuse syncobj helpers Lionel Landwerlin
` (4 subsequent siblings)
12 siblings, 0 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:30 UTC (permalink / raw)
To: igt-dev
Drop local defines etc..
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
tests/i915/gem_exec_fence.c | 122 ++++++++++++++++--------------------
1 file changed, 55 insertions(+), 67 deletions(-)
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index 0befb54f..f8c0da7c 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -33,18 +33,6 @@
IGT_TEST_DESCRIPTION("Check that execbuf waits for explicit fences");
-#define LOCAL_EXEC_FENCE_IN (1 << 16)
-#define LOCAL_EXEC_FENCE_OUT (1 << 17)
-#define LOCAL_EXEC_FENCE_SUBMIT (1 << 20)
-
-#define LOCAL_EXEC_FENCE_ARRAY (1 << 19)
-struct local_gem_exec_fence {
- uint32_t handle;
- uint32_t flags;
-#define LOCAL_EXEC_FENCE_WAIT (1 << 0)
-#define LOCAL_EXEC_FENCE_SIGNAL (1 << 1)
-};
-
#ifndef SYNC_IOC_MERGE
struct sync_merge_data {
char name[32];
@@ -71,7 +59,7 @@ static void store(int fd, unsigned ring, int fence, uint32_t target, unsigned of
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
- execbuf.flags = ring | LOCAL_EXEC_FENCE_IN;
+ execbuf.flags = ring | I915_EXEC_FENCE_IN;
execbuf.rsvd2 = fence;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
@@ -134,7 +122,7 @@ static void test_fence_busy(int fd, unsigned ring, unsigned flags)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = ring | LOCAL_EXEC_FENCE_OUT;
+ execbuf.flags = ring | I915_EXEC_FENCE_OUT;
memset(&obj, 0, sizeof(obj));
obj.handle = gem_create(fd, 4096);
@@ -266,7 +254,7 @@ static void test_fence_busy_all(int fd, unsigned flags)
for_each_engine(fd, engine) {
int fence, new;
- execbuf.flags = engine | LOCAL_EXEC_FENCE_OUT;
+ execbuf.flags = engine | I915_EXEC_FENCE_OUT;
execbuf.rsvd2 = -1;
gem_execbuf_wr(fd, &execbuf);
fence = execbuf.rsvd2 >> 32;
@@ -428,7 +416,7 @@ static void test_parallel(int fd, unsigned int master)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
- execbuf.flags = master | LOCAL_EXEC_FENCE_OUT;
+ execbuf.flags = master | I915_EXEC_FENCE_OUT;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
@@ -501,7 +489,7 @@ static void test_parallel(int fd, unsigned int master)
if (engine == master)
continue;
- execbuf.flags = engine | LOCAL_EXEC_FENCE_SUBMIT;
+ execbuf.flags = engine | I915_EXEC_FENCE_SUBMIT;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
@@ -597,7 +585,7 @@ static void test_keep_in_fence(int fd, unsigned int engine, unsigned int flags)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
- .flags = engine | LOCAL_EXEC_FENCE_OUT,
+ .flags = engine | I915_EXEC_FENCE_OUT,
};
unsigned long count, last;
struct itimerval itv;
@@ -616,7 +604,7 @@ static void test_keep_in_fence(int fd, unsigned int engine, unsigned int flags)
itv.it_value.tv_usec = 10000;
setitimer(ITIMER_REAL, &itv, NULL);
- execbuf.flags |= LOCAL_EXEC_FENCE_IN;
+ execbuf.flags |= I915_EXEC_FENCE_IN;
execbuf.rsvd2 = fence;
last = -1;
@@ -686,7 +674,7 @@ static void test_long_history(int fd, long ring_size, unsigned flags)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj[1]);
execbuf.buffer_count = 1;
- execbuf.flags = LOCAL_EXEC_FENCE_OUT;
+ execbuf.flags = I915_EXEC_FENCE_OUT;
gem_execbuf_wr(fd, &execbuf);
all_fences = execbuf.rsvd2 >> 32;
@@ -702,7 +690,7 @@ static void test_long_history(int fd, long ring_size, unsigned flags)
for (n = 0; n < nengine; n++) {
struct sync_merge_data merge;
- execbuf.flags = engines[n] | LOCAL_EXEC_FENCE_OUT;
+ execbuf.flags = engines[n] | I915_EXEC_FENCE_OUT;
if (__gem_execbuf_wr(fd, &execbuf))
continue;
@@ -736,7 +724,7 @@ static void test_long_history(int fd, long ring_size, unsigned flags)
for (s = 0; s < ring_size; s++) {
for (n = 0; n < nengine; n++) {
- execbuf.flags = engines[n] | LOCAL_EXEC_FENCE_IN;
+ execbuf.flags = engines[n] | I915_EXEC_FENCE_IN;
if (__gem_execbuf_wr(fd, &execbuf))
continue;
}
@@ -760,7 +748,7 @@ static bool has_submit_fence(int fd)
int value = 0;
memset(&gp, 0, sizeof(gp));
- gp.param = 0xdeadbeef ^ 51; /* I915_PARAM_HAS_EXEC_SUBMIT_FENCE */
+ gp.param = 0xdeadbeef ^ I915_PARAM_HAS_EXEC_SUBMIT_FENCE;
gp.value = &value;
ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
@@ -771,7 +759,7 @@ static bool has_submit_fence(int fd)
static bool has_syncobj(int fd)
{
- struct drm_get_cap cap = { .capability = 0x13 };
+ struct drm_get_cap cap = { .capability = DRM_CAP_SYNCOBJ };
ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
return cap.value;
}
@@ -782,7 +770,7 @@ static bool exec_has_fence_array(int fd)
int value = 0;
memset(&gp, 0, sizeof(gp));
- gp.param = 49; /* I915_PARAM_HAS_EXEC_FENCE_ARRAY */
+ gp.param = I915_PARAM_HAS_EXEC_FENCE_ARRAY;
gp.value = &value;
ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
@@ -796,7 +784,7 @@ static void test_invalid_fence_array(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj;
- struct local_gem_exec_fence fence;
+ struct drm_i915_gem_exec_fence fence;
void *ptr;
/* create an otherwise valid execbuf */
@@ -808,7 +796,7 @@ static void test_invalid_fence_array(int fd)
execbuf.buffer_count = 1;
gem_execbuf(fd, &execbuf);
- execbuf.flags |= LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags |= I915_EXEC_FENCE_ARRAY;
gem_execbuf(fd, &execbuf);
/* Now add a few invalid fence-array pointers */
@@ -1020,7 +1008,7 @@ static void test_syncobj_unused_fence(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
- struct local_gem_exec_fence fence = {
+ struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
igt_spin_t *spin = igt_spin_new(fd);
@@ -1031,7 +1019,7 @@ static void test_syncobj_unused_fence(int fd)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
@@ -1056,14 +1044,14 @@ static void test_syncobj_invalid_wait(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
- struct local_gem_exec_fence fence = {
+ struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
@@ -1072,7 +1060,7 @@ static void test_syncobj_invalid_wait(int fd)
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
/* waiting before the fence is set is invalid */
- fence.flags = LOCAL_EXEC_FENCE_WAIT;
+ fence.flags = I915_EXEC_FENCE_WAIT;
igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
gem_close(fd, obj.handle);
@@ -1084,14 +1072,14 @@ static void test_syncobj_invalid_flags(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
- struct local_gem_exec_fence fence = {
+ struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
@@ -1112,7 +1100,7 @@ static void test_syncobj_signal(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
- struct local_gem_exec_fence fence = {
+ struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
igt_spin_t *spin = igt_spin_new(fd);
@@ -1122,7 +1110,7 @@ static void test_syncobj_signal(int fd)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
@@ -1130,7 +1118,7 @@ static void test_syncobj_signal(int fd)
obj.handle = gem_create(fd, 4096);
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
- fence.flags = LOCAL_EXEC_FENCE_SIGNAL;
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
gem_execbuf(fd, &execbuf);
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -1151,7 +1139,7 @@ static void test_syncobj_wait(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
- struct local_gem_exec_fence fence = {
+ struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
igt_spin_t *spin;
@@ -1176,10 +1164,10 @@ static void test_syncobj_wait(int fd)
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
/* Queue a signaler from the blocked engine */
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
- fence.flags = LOCAL_EXEC_FENCE_SIGNAL;
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
gem_execbuf(fd, &execbuf);
igt_assert(gem_bo_busy(fd, spin->handle));
@@ -1204,10 +1192,10 @@ static void test_syncobj_wait(int fd)
igt_assert(gem_bo_busy(fd, spin->handle));
/* Now wait upon the blocked engine */
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY | engine;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY | engine;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
- fence.flags = LOCAL_EXEC_FENCE_WAIT;
+ fence.flags = I915_EXEC_FENCE_WAIT;
gem_execbuf(fd, &execbuf);
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -1231,7 +1219,7 @@ static void test_syncobj_export(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
- struct local_gem_exec_fence fence = {
+ struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd),
};
int export[2];
@@ -1248,7 +1236,7 @@ static void test_syncobj_export(int fd)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
@@ -1256,7 +1244,7 @@ static void test_syncobj_export(int fd)
obj.handle = gem_create(fd, 4096);
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
- fence.flags = LOCAL_EXEC_FENCE_SIGNAL;
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
gem_execbuf(fd, &execbuf);
igt_assert(syncobj_busy(fd, fence.handle));
@@ -1291,7 +1279,7 @@ static void test_syncobj_repeat(int fd)
const unsigned nfences = 4096;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
- struct local_gem_exec_fence *fence;
+ struct drm_i915_gem_exec_fence *fence;
int export;
igt_spin_t *spin = igt_spin_new(fd);
@@ -1306,7 +1294,7 @@ static void test_syncobj_repeat(int fd)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(fence);
execbuf.num_cliprects = nfences;
@@ -1315,13 +1303,13 @@ static void test_syncobj_repeat(int fd)
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
for (int i = 0; i < nfences; i++)
- fence[i].flags = LOCAL_EXEC_FENCE_SIGNAL;
+ fence[i].flags = I915_EXEC_FENCE_SIGNAL;
gem_execbuf(fd, &execbuf);
for (int i = 0; i < nfences; i++) {
igt_assert(syncobj_busy(fd, fence[i].handle));
- fence[i].flags |= LOCAL_EXEC_FENCE_WAIT;
+ fence[i].flags |= I915_EXEC_FENCE_WAIT;
}
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -1359,7 +1347,7 @@ static void test_syncobj_import(int fd)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = LOCAL_EXEC_FENCE_OUT;
+ execbuf.flags = I915_EXEC_FENCE_OUT;
execbuf.rsvd2 = -1;
memset(&obj, 0, sizeof(obj));
@@ -1404,7 +1392,7 @@ static void test_syncobj_channel(int fd)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = LOCAL_EXEC_FENCE_OUT;
+ execbuf.flags = I915_EXEC_FENCE_OUT;
execbuf.rsvd2 = -1;
memset(&obj, 0, sizeof(obj));
@@ -1412,15 +1400,15 @@ static void test_syncobj_channel(int fd)
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
for (int i = 0; i < ARRAY_SIZE(syncobj); i++) {
- struct local_gem_exec_fence fence;
+ struct drm_i915_gem_exec_fence fence;
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
/* Create a primed fence */
fence.handle = syncobj_create(fd);
- fence.flags = LOCAL_EXEC_FENCE_SIGNAL;
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
gem_execbuf(fd, &execbuf);
@@ -1429,21 +1417,21 @@ static void test_syncobj_channel(int fd)
/* Two processes in ping-pong unison (pipe), one out of sync */
igt_fork(child, 1) {
- struct local_gem_exec_fence fence[3];
+ struct drm_i915_gem_exec_fence fence[3];
unsigned long count;
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(fence);
execbuf.num_cliprects = 3;
fence[0].handle = syncobj[0];
- fence[0].flags = LOCAL_EXEC_FENCE_SIGNAL;
+ fence[0].flags = I915_EXEC_FENCE_SIGNAL;
fence[1].handle = syncobj[1];
- fence[1].flags = LOCAL_EXEC_FENCE_WAIT;
+ fence[1].flags = I915_EXEC_FENCE_WAIT;
fence[2].handle = syncobj[2];
- fence[2].flags = LOCAL_EXEC_FENCE_WAIT;
+ fence[2].flags = I915_EXEC_FENCE_WAIT;
count = 0;
while (!*(volatile unsigned *)control) {
@@ -1454,21 +1442,21 @@ static void test_syncobj_channel(int fd)
control[1] = count;
}
igt_fork(child, 1) {
- struct local_gem_exec_fence fence[3];
+ struct drm_i915_gem_exec_fence fence[3];
unsigned long count;
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(fence);
execbuf.num_cliprects = 3;
fence[0].handle = syncobj[0];
- fence[0].flags = LOCAL_EXEC_FENCE_WAIT;
+ fence[0].flags = I915_EXEC_FENCE_WAIT;
fence[1].handle = syncobj[1];
- fence[1].flags = LOCAL_EXEC_FENCE_SIGNAL;
+ fence[1].flags = I915_EXEC_FENCE_SIGNAL;
fence[2].handle = syncobj[2];
- fence[2].flags = LOCAL_EXEC_FENCE_WAIT;
+ fence[2].flags = I915_EXEC_FENCE_WAIT;
count = 0;
while (!*(volatile unsigned *)control) {
@@ -1478,15 +1466,15 @@ static void test_syncobj_channel(int fd)
control[2] = count;
}
igt_fork(child, 1) {
- struct local_gem_exec_fence fence;
+ struct drm_i915_gem_exec_fence fence;
unsigned long count;
- execbuf.flags = LOCAL_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
fence.handle = syncobj[2];
- fence.flags = LOCAL_EXEC_FENCE_SIGNAL;
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
count = 0;
while (!*(volatile unsigned *)control) {
--
2.22.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 21+ messages in thread* [igt-dev] [PATCH i-g-t 09/10] tests/i915/exec_fence: reuse syncobj helpers
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
` (7 preceding siblings ...)
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 08/10] tests/i915/exec_fence: switch to internal headers Lionel Landwerlin
@ 2019-07-25 10:30 ` Lionel Landwerlin
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 10/10] tests/i915/exec_fence: add timeline fence tests Lionel Landwerlin
` (3 subsequent siblings)
12 siblings, 0 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:30 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
tests/i915/gem_exec_fence.c | 181 ++++--------------------------------
1 file changed, 19 insertions(+), 162 deletions(-)
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index f8c0da7c..d73477f4 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -22,6 +22,7 @@
*/
#include "igt.h"
+#include "igt_syncobj.h"
#include "igt_sysfs.h"
#include "igt_vgem.h"
#include "sw_sync.h"
@@ -827,168 +828,24 @@ static void test_invalid_fence_array(int fd)
munmap(ptr, 4096);
}
-static uint32_t __syncobj_create(int fd)
-{
- struct local_syncobj_create {
- uint32_t handle, flags;
- } arg;
-#define LOCAL_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct local_syncobj_create)
-
- memset(&arg, 0, sizeof(arg));
- igt_ioctl(fd, LOCAL_IOCTL_SYNCOBJ_CREATE, &arg);
-
- return arg.handle;
-}
-
-static uint32_t syncobj_create(int fd)
-{
- uint32_t ret;
-
- igt_assert_neq((ret = __syncobj_create(fd)), 0);
-
- return ret;
-}
-
-static int __syncobj_destroy(int fd, uint32_t handle)
-{
- struct local_syncobj_destroy {
- uint32_t handle, flags;
- } arg;
-#define LOCAL_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct local_syncobj_destroy)
- int err = 0;
-
- memset(&arg, 0, sizeof(arg));
- arg.handle = handle;
- if (igt_ioctl(fd, LOCAL_IOCTL_SYNCOBJ_DESTROY, &arg))
- err = -errno;
-
- errno = 0;
- return err;
-}
-
-static void syncobj_destroy(int fd, uint32_t handle)
-{
- igt_assert_eq(__syncobj_destroy(fd, handle), 0);
-}
-
static int __syncobj_to_sync_file(int fd, uint32_t handle)
{
- struct local_syncobj_handle {
- uint32_t handle;
- uint32_t flags;
- int32_t fd;
- uint32_t pad;
- } arg;
-#define LOCAL_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct local_syncobj_handle)
-
- memset(&arg, 0, sizeof(arg));
- arg.handle = handle;
- arg.flags = 1 << 0; /* EXPORT_SYNC_FILE */
- if (igt_ioctl(fd, LOCAL_IOCTL_SYNCOBJ_HANDLE_TO_FD, &arg))
- arg.fd = -errno;
-
- errno = 0;
- return arg.fd;
-}
-
-static int syncobj_to_sync_file(int fd, uint32_t handle)
-{
- int ret;
-
- igt_assert_lte(0, (ret = __syncobj_to_sync_file(fd, handle)));
-
- return ret;
-}
-
-static int __syncobj_from_sync_file(int fd, uint32_t handle, int sf)
-{
- struct local_syncobj_handle {
- uint32_t handle;
- uint32_t flags;
- int32_t fd;
- uint32_t pad;
- } arg;
-#define LOCAL_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct local_syncobj_handle)
- int err = 0;
-
- memset(&arg, 0, sizeof(arg));
- arg.handle = handle;
- arg.fd = sf;
- arg.flags = 1 << 0; /* IMPORT_SYNC_FILE */
- if (igt_ioctl(fd, LOCAL_IOCTL_SYNCOBJ_FD_TO_HANDLE, &arg))
- err = -errno;
-
- errno = 0;
- return err;
-}
-
-static void syncobj_from_sync_file(int fd, uint32_t handle, int sf)
-{
- igt_assert_eq(__syncobj_from_sync_file(fd, handle, sf), 0);
-}
-
-static int __syncobj_export(int fd, uint32_t handle, int *syncobj)
-{
- struct local_syncobj_handle {
- uint32_t handle;
- uint32_t flags;
- int32_t fd;
- uint32_t pad;
- } arg;
- int err;
-
- memset(&arg, 0, sizeof(arg));
- arg.handle = handle;
-
- err = 0;
- if (igt_ioctl(fd, LOCAL_IOCTL_SYNCOBJ_HANDLE_TO_FD, &arg))
- err = -errno;
+ struct drm_syncobj_handle arg = {
+ .handle = handle,
+ .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
+ };
- errno = 0;
- *syncobj = arg.fd;
- return err;
+ return __syncobj_handle_to_fd(fd, &arg);
}
static int syncobj_export(int fd, uint32_t handle)
{
- int syncobj;
-
- igt_assert_eq(__syncobj_export(fd, handle, &syncobj), 0);
-
- return syncobj;
-}
-
-static int __syncobj_import(int fd, int syncobj, uint32_t *handle)
-{
- struct local_syncobj_handle {
- uint32_t handle;
- uint32_t flags;
- int32_t fd;
- uint32_t pad;
- } arg;
-#define LOCAL_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct local_syncobj_handle)
- int err;
-
- memset(&arg, 0, sizeof(arg));
- arg.fd = syncobj;
-
- err = 0;
- if (igt_ioctl(fd, LOCAL_IOCTL_SYNCOBJ_FD_TO_HANDLE, &arg))
- err = -errno;
-
- errno = 0;
- *handle = arg.handle;
- return err;
+ return syncobj_handle_to_fd(fd, handle, 0);
}
static uint32_t syncobj_import(int fd, int syncobj)
{
- uint32_t handle;
-
- igt_assert_eq(__syncobj_import(fd, syncobj, &handle), 0);
-
-
- return handle;
+ return syncobj_fd_to_handle(fd, syncobj, 0);
}
static bool syncobj_busy(int fd, uint32_t handle)
@@ -996,7 +853,7 @@ static bool syncobj_busy(int fd, uint32_t handle)
bool result;
int sf;
- sf = syncobj_to_sync_file(fd, handle);
+ sf = syncobj_handle_to_fd(fd, handle, 0);
result = poll(&(struct pollfd){sf, POLLIN}, 1, 0) == 0;
close(sf);
@@ -1009,7 +866,7 @@ static void test_syncobj_unused_fence(int fd)
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_fence fence = {
- .handle = syncobj_create(fd),
+ .handle = syncobj_create(fd, 0),
};
igt_spin_t *spin = igt_spin_new(fd);
@@ -1045,7 +902,7 @@ static void test_syncobj_invalid_wait(int fd)
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_fence fence = {
- .handle = syncobj_create(fd),
+ .handle = syncobj_create(fd, 0),
};
memset(&execbuf, 0, sizeof(execbuf));
@@ -1073,7 +930,7 @@ static void test_syncobj_invalid_flags(int fd)
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_fence fence = {
- .handle = syncobj_create(fd),
+ .handle = syncobj_create(fd, 0),
};
memset(&execbuf, 0, sizeof(execbuf));
@@ -1101,7 +958,7 @@ static void test_syncobj_signal(int fd)
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_fence fence = {
- .handle = syncobj_create(fd),
+ .handle = syncobj_create(fd, 0),
};
igt_spin_t *spin = igt_spin_new(fd);
@@ -1140,7 +997,7 @@ static void test_syncobj_wait(int fd)
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_fence fence = {
- .handle = syncobj_create(fd),
+ .handle = syncobj_create(fd, 0),
};
igt_spin_t *spin;
unsigned engine;
@@ -1220,7 +1077,7 @@ static void test_syncobj_export(int fd)
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_fence fence = {
- .handle = syncobj_create(fd),
+ .handle = syncobj_create(fd, 0),
};
int export[2];
igt_spin_t *spin = igt_spin_new(fd);
@@ -1285,7 +1142,7 @@ static void test_syncobj_repeat(int fd)
/* Check that we can wait on the same fence multiple times */
fence = calloc(nfences, sizeof(*fence));
- fence->handle = syncobj_create(fd);
+ fence->handle = syncobj_create(fd, 0);
export = syncobj_export(fd, fence->handle);
for (int i = 1; i < nfences; i++)
fence[i].handle = syncobj_import(fd, export);
@@ -1337,7 +1194,7 @@ static void test_syncobj_import(int fd)
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_execbuffer2 execbuf;
igt_spin_t *spin = igt_spin_new(fd);
- uint32_t sync = syncobj_create(fd);
+ uint32_t sync = syncobj_create(fd, 0);
int fence;
/* Check that we can create a syncobj from an explicit fence (which
@@ -1358,7 +1215,7 @@ static void test_syncobj_import(int fd)
fence = execbuf.rsvd2 >> 32;
igt_assert(fence_busy(fence));
- syncobj_from_sync_file(fd, sync, fence);
+ syncobj_import_sync_file(fd, sync, fence);
close(fence);
igt_assert(gem_bo_busy(fd, obj.handle));
@@ -1407,7 +1264,7 @@ static void test_syncobj_channel(int fd)
execbuf.num_cliprects = 1;
/* Create a primed fence */
- fence.handle = syncobj_create(fd);
+ fence.handle = syncobj_create(fd, 0);
fence.flags = I915_EXEC_FENCE_SIGNAL;
gem_execbuf(fd, &execbuf);
--
2.22.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 21+ messages in thread* [igt-dev] [PATCH i-g-t 10/10] tests/i915/exec_fence: add timeline fence tests
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
` (8 preceding siblings ...)
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 09/10] tests/i915/exec_fence: reuse syncobj helpers Lionel Landwerlin
@ 2019-07-25 10:30 ` Lionel Landwerlin
2019-07-31 20:33 ` Chris Wilson
2019-07-31 20:35 ` Chris Wilson
2019-07-25 11:18 ` [igt-dev] ✗ GitLab.Pipeline: warning for Vulkan performance queries support & others Patchwork
` (2 subsequent siblings)
12 siblings, 2 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:30 UTC (permalink / raw)
To: igt-dev
We can now give a tuple (handle, point_value) for timeline semaphores.
Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
tests/i915/gem_exec_fence.c | 581 ++++++++++++++++++++++++++++++++++++
1 file changed, 581 insertions(+)
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index d73477f4..bd857bb6 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -1356,6 +1356,551 @@ static void test_syncobj_channel(int fd)
syncobj_destroy(fd, syncobj[i]);
}
+static bool has_syncobj_timeline(int fd)
+{
+ struct drm_get_cap cap = { .capability = DRM_CAP_SYNCOBJ_TIMELINE };
+ ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
+ return cap.value;
+}
+
+static bool exec_has_timeline_fences(int fd)
+{
+ struct drm_i915_getparam gp;
+ int value = 0;
+
+ memset(&gp, 0, sizeof(gp));
+ gp.param = I915_PARAM_HAS_EXEC_TIMELINE_FENCES;
+ gp.value = &value;
+
+ ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
+ errno = 0;
+
+ return value;
+}
+
+static void test_invalid_timeline_fence_array(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 obj;
+ struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
+ struct drm_i915_gem_exec_fence fence;
+ uint64_t value;
+ void *ptr;
+
+ /* create an otherwise valid execbuf */
+ memset(&obj, 0, sizeof(obj));
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+ gem_execbuf(fd, &execbuf);
+
+ /* Invalid num_cliprects value */
+ execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
+ execbuf.num_cliprects = 1;
+ execbuf.flags = I915_EXEC_EXT;
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
+
+ fence.handle = syncobj_create(fd, 0);
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ value = 1;
+
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = to_user_pointer(&value);
+
+ /* Invalid fence array & i915 ext */
+ execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
+ execbuf.num_cliprects = 0;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY | I915_EXEC_EXT;
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
+
+ syncobj_create(fd, fence.handle);
+
+ execbuf.flags = I915_EXEC_EXT;
+
+ /* Invalid handles_ptr */
+ value = 1;
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = -1;
+ timeline_fences.values_ptr = to_user_pointer(&value);
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EFAULT);
+
+ /* Invalid values_ptr */
+ value = 1;
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = -1;
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EFAULT);
+
+ /* Invalid syncobj handle */
+ memset(&fence, 0, sizeof(fence));
+ fence.handle = 0;
+ fence.flags = I915_EXEC_FENCE_WAIT;
+ value = 1;
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = to_user_pointer(&value);
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
+
+ /* Invalid syncobj timeline point */
+ memset(&fence, 0, sizeof(fence));
+ fence.handle = syncobj_create(fd, 0);
+ fence.flags = I915_EXEC_FENCE_WAIT;
+ value = 1;
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = to_user_pointer(&value);
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
+ syncobj_destroy(fd, fence.handle);
+
+ /* Invalid handles_ptr */
+ ptr = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+ igt_assert(ptr != MAP_FAILED);
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(ptr);
+ timeline_fences.values_ptr = to_user_pointer(&value);
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
+
+ do_or_die(mprotect(ptr, 4096, PROT_READ));
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
+
+ do_or_die(mprotect(ptr, 4096, PROT_NONE));
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EFAULT);
+
+ munmap(ptr, 4096);
+
+ /* Invalid values_ptr */
+ ptr = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+ igt_assert(ptr != MAP_FAILED);
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = to_user_pointer(ptr);
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
+
+ do_or_die(mprotect(ptr, 4096, PROT_READ));
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
+
+ do_or_die(mprotect(ptr, 4096, PROT_NONE));
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EFAULT);
+
+ munmap(ptr, 4096);
+}
+
+static void test_syncobj_timeline_unused_fence(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(fd, 0),
+ };
+ igt_spin_t *spin = igt_spin_new(fd);
+ uint64_t value = 1;
+
+ /* sanity check our syncobj_to_sync_file interface */
+ igt_assert_eq(__syncobj_to_sync_file(fd, 0), -ENOENT);
+
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = to_user_pointer(&value);
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+ execbuf.flags = I915_EXEC_EXT;
+ execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
+ execbuf.num_cliprects = 0;
+
+ memset(&obj, 0, sizeof(obj));
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ gem_execbuf(fd, &execbuf);
+
+ /* no flags, the fence isn't created */
+ igt_assert_eq(__syncobj_to_sync_file(fd, fence.handle), -EINVAL);
+ igt_assert(gem_bo_busy(fd, obj.handle));
+
+ gem_close(fd, obj.handle);
+ syncobj_destroy(fd, fence.handle);
+
+ igt_spin_free(fd, spin);
+}
+
+static void test_syncobj_timeline_invalid_wait(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(fd, 0),
+ };
+ uint64_t value = 1;
+
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = to_user_pointer(&value);
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+ execbuf.flags = I915_EXEC_EXT;
+ execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
+ execbuf.num_cliprects = 0;
+
+ memset(&obj, 0, sizeof(obj));
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ /* waiting before the fence point 1 is set is invalid */
+ fence.flags = I915_EXEC_FENCE_WAIT;
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
+
+ /* Now set point 1. */
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ gem_execbuf(fd, &execbuf);
+
+ /* waiting before the fence point 2 is set is invalid */
+ value = 2;
+ fence.flags = I915_EXEC_FENCE_WAIT;
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
+
+ gem_close(fd, obj.handle);
+ syncobj_destroy(fd, fence.handle);
+}
+
+static void test_syncobj_timeline_invalid_flags(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(fd, 0),
+ };
+ uint64_t value = 1;
+
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = to_user_pointer(&value);
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+ execbuf.flags = I915_EXEC_EXT;
+ execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
+ execbuf.num_cliprects = 0;
+
+ memset(&obj, 0, sizeof(obj));
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ /* set all flags to hit an invalid one */
+ fence.flags = ~0;
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
+
+ gem_close(fd, obj.handle);
+ syncobj_destroy(fd, fence.handle);
+}
+
+static void test_syncobj_timeline_signal(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(fd, 0),
+ };
+ uint64_t value = 1;
+ igt_spin_t *spin = igt_spin_new(fd);
+
+ /* Check that the syncobj is signaled only when our request/fence is */
+
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = to_user_pointer(&value);
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+ execbuf.flags = I915_EXEC_EXT;
+ execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
+ execbuf.num_cliprects = 0;
+
+ memset(&obj, 0, sizeof(obj));
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ gem_execbuf(fd, &execbuf);
+
+ igt_assert(gem_bo_busy(fd, obj.handle));
+ igt_assert(syncobj_busy(fd, fence.handle));
+
+ igt_spin_free(fd, spin);
+
+ gem_sync(fd, obj.handle);
+ igt_assert(!gem_bo_busy(fd, obj.handle));
+ igt_assert(!syncobj_busy(fd, fence.handle));
+
+ gem_close(fd, obj.handle);
+ syncobj_destroy(fd, fence.handle);
+}
+
+static void test_syncobj_timeline_wait(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(fd, 0),
+ };
+ uint64_t value = 1;
+ igt_spin_t *spin;
+ unsigned engine;
+ unsigned handle[16];
+ int n;
+
+ /* Check that we can use the syncobj to asynchronous wait prior to
+ * execution.
+ */
+
+ gem_quiescent_gpu(fd);
+
+ spin = igt_spin_new(fd);
+
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = to_user_pointer(&value);
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+
+ memset(&obj, 0, sizeof(obj));
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ /* Queue a signaler from the blocked engine */
+ execbuf.flags = I915_EXEC_EXT;
+ execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
+ execbuf.num_cliprects = 0;
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ gem_execbuf(fd, &execbuf);
+ igt_assert(gem_bo_busy(fd, spin->handle));
+
+ gem_close(fd, obj.handle);
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ n = 0;
+ for_each_engine(fd, engine) {
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ /* No inter-engine synchronisation, will complete */
+ if (engine == I915_EXEC_BLT) {
+ execbuf.flags = engine;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ gem_execbuf(fd, &execbuf);
+ gem_sync(fd, obj.handle);
+ igt_assert(gem_bo_busy(fd, spin->handle));
+ }
+ igt_assert(gem_bo_busy(fd, spin->handle));
+
+ /* Now wait upon the blocked engine */
+ execbuf.flags = I915_EXEC_EXT | engine;
+ execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
+ execbuf.num_cliprects = 0;
+ fence.flags = I915_EXEC_FENCE_WAIT;
+ gem_execbuf(fd, &execbuf);
+
+ igt_assert(gem_bo_busy(fd, obj.handle));
+ handle[n++] = obj.handle;
+ }
+ syncobj_destroy(fd, fence.handle);
+
+ for (int i = 0; i < n; i++)
+ igt_assert(gem_bo_busy(fd, handle[i]));
+
+ igt_spin_free(fd, spin);
+
+ for (int i = 0; i < n; i++) {
+ gem_sync(fd, handle[i]);
+ gem_close(fd, handle[i]);
+ }
+}
+
+static void test_syncobj_timeline_export(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(fd, 0),
+ };
+ uint64_t value = 1;
+ int export[2];
+ igt_spin_t *spin = igt_spin_new(fd);
+
+ /* Check that if we export the syncobj prior to use it picks up
+ * the later fence. This allows a syncobj to establish a channel
+ * between clients that may be updated to a later fence by either
+ * end.
+ */
+ for (int n = 0; n < ARRAY_SIZE(export); n++)
+ export[n] = syncobj_export(fd, fence.handle);
+
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(&fence);
+ timeline_fences.values_ptr = to_user_pointer(&value);
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+ execbuf.flags = I915_EXEC_EXT;
+ execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
+ execbuf.num_cliprects = 0;
+
+ memset(&obj, 0, sizeof(obj));
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ gem_execbuf(fd, &execbuf);
+
+ igt_assert(syncobj_busy(fd, fence.handle));
+ igt_assert(gem_bo_busy(fd, obj.handle));
+
+ for (int n = 0; n < ARRAY_SIZE(export); n++) {
+ uint32_t import = syncobj_import(fd, export[n]);
+ igt_assert(syncobj_busy(fd, import));
+ syncobj_destroy(fd, import);
+ }
+
+ igt_spin_free(fd, spin);
+
+ gem_sync(fd, obj.handle);
+ igt_assert(!gem_bo_busy(fd, obj.handle));
+ igt_assert(!syncobj_busy(fd, fence.handle));
+
+ gem_close(fd, obj.handle);
+ syncobj_destroy(fd, fence.handle);
+
+ for (int n = 0; n < ARRAY_SIZE(export); n++) {
+ uint32_t import = syncobj_import(fd, export[n]);
+ igt_assert(!syncobj_busy(fd, import));
+ syncobj_destroy(fd, import);
+ close(export[n]);
+ }
+}
+
+static void test_syncobj_timeline_repeat(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ const unsigned nfences = 4096;
+ struct drm_i915_gem_exec_object2 obj;
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
+ struct drm_i915_gem_exec_fence *fence;
+ uint64_t *values;
+ int export;
+ igt_spin_t *spin = igt_spin_new(fd);
+
+ /* Check that we can wait on the same fence multiple times */
+ fence = calloc(nfences, sizeof(*fence));
+ values = calloc(nfences, sizeof(*values));
+ fence->handle = syncobj_create(fd, 0);
+ values[0] = 1;
+ export = syncobj_export(fd, fence->handle);
+ for (int i = 1; i < nfences; i++) {
+ fence[i].handle = syncobj_import(fd, export);
+ values[i] = i + 1;
+ }
+ close(export);
+
+ memset(&timeline_fences, 0, sizeof(timeline_fences));
+ timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
+ timeline_fences.fence_count = 1;
+ timeline_fences.handles_ptr = to_user_pointer(fence);
+ timeline_fences.values_ptr = to_user_pointer(values);
+
+ memset(&execbuf, 0, sizeof(execbuf));
+ execbuf.buffers_ptr = to_user_pointer(&obj);
+ execbuf.buffer_count = 1;
+ execbuf.flags = I915_EXEC_EXT;
+ execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
+ execbuf.num_cliprects = 0;
+
+ memset(&obj, 0, sizeof(obj));
+ obj.handle = gem_create(fd, 4096);
+ gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
+
+ for (int i = 0; i < nfences; i++)
+ fence[i].flags = I915_EXEC_FENCE_SIGNAL;
+
+ gem_execbuf(fd, &execbuf);
+
+ for (int i = 0; i < nfences; i++) {
+ igt_assert(syncobj_busy(fd, fence[i].handle));
+ fence[i].flags |= I915_EXEC_FENCE_WAIT;
+ }
+ igt_assert(gem_bo_busy(fd, obj.handle));
+
+ gem_execbuf(fd, &execbuf);
+
+ for (int i = 0; i < nfences; i++)
+ igt_assert(syncobj_busy(fd, fence[i].handle));
+ igt_assert(gem_bo_busy(fd, obj.handle));
+
+ igt_spin_free(fd, spin);
+
+ gem_sync(fd, obj.handle);
+ gem_close(fd, obj.handle);
+
+ for (int i = 0; i < nfences; i++) {
+ igt_assert(!syncobj_busy(fd, fence[i].handle));
+ syncobj_destroy(fd, fence[i].handle);
+ }
+ free(fence);
+ free(values);
+}
+
igt_main
{
const struct intel_execution_engine *e;
@@ -1523,6 +2068,42 @@ igt_main
}
}
+ igt_subtest_group { /* syncob timeline */
+ igt_fixture {
+ igt_require(exec_has_timeline_fences(i915));
+ igt_assert(has_syncobj_timeline(i915));
+ igt_fork_hang_detector(i915);
+ }
+
+ igt_subtest("invalid-timeline-fence-array")
+ test_invalid_timeline_fence_array(i915);
+
+ igt_subtest("syncobj-timeline-unused-fence")
+ test_syncobj_timeline_unused_fence(i915);
+
+ igt_subtest("syncobj-timeline-invalid-wait")
+ test_syncobj_timeline_invalid_wait(i915);
+
+ igt_subtest("syncobj-timeline-invalid-flags")
+ test_syncobj_timeline_invalid_flags(i915);
+
+ igt_subtest("syncobj-timeline-signal")
+ test_syncobj_timeline_signal(i915);
+
+ igt_subtest("syncobj-timeline-wait")
+ test_syncobj_timeline_wait(i915);
+
+ igt_subtest("syncobj-timeline-export")
+ test_syncobj_timeline_export(i915);
+
+ igt_subtest("syncobj-timeline-repeat")
+ test_syncobj_timeline_repeat(i915);
+
+ igt_fixture {
+ igt_stop_hang_detector();
+ }
+ }
+
igt_fixture {
close(i915);
}
--
2.22.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 21+ messages in thread* Re: [igt-dev] [PATCH i-g-t 10/10] tests/i915/exec_fence: add timeline fence tests
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 10/10] tests/i915/exec_fence: add timeline fence tests Lionel Landwerlin
@ 2019-07-31 20:33 ` Chris Wilson
2019-08-02 9:03 ` Lionel Landwerlin
2019-07-31 20:35 ` Chris Wilson
1 sibling, 1 reply; 21+ messages in thread
From: Chris Wilson @ 2019-07-31 20:33 UTC (permalink / raw)
To: Lionel Landwerlin, igt-dev
Quoting Lionel Landwerlin (2019-07-25 11:30:34)
> +static void test_syncobj_timeline_wait(int fd)
> +{
> + const uint32_t bbe = MI_BATCH_BUFFER_END;
> + struct drm_i915_gem_exec_object2 obj;
> + struct drm_i915_gem_execbuffer2 execbuf;
> + struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
> + struct drm_i915_gem_exec_fence fence = {
> + .handle = syncobj_create(fd, 0),
> + };
> + uint64_t value = 1;
> + igt_spin_t *spin;
> + unsigned engine;
> + unsigned handle[16];
> + int n;
> +
> + /* Check that we can use the syncobj to asynchronous wait prior to
> + * execution.
> + */
> +
> + gem_quiescent_gpu(fd);
> +
> + spin = igt_spin_new(fd);
> +
> + memset(&timeline_fences, 0, sizeof(timeline_fences));
> + timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
> + timeline_fences.fence_count = 1;
> + timeline_fences.handles_ptr = to_user_pointer(&fence);
> + timeline_fences.values_ptr = to_user_pointer(&value);
> +
> + memset(&execbuf, 0, sizeof(execbuf));
> + execbuf.buffers_ptr = to_user_pointer(&obj);
> + execbuf.buffer_count = 1;
> +
> + memset(&obj, 0, sizeof(obj));
> + obj.handle = gem_create(fd, 4096);
> + gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
> +
> + /* Queue a signaler from the blocked engine */
> + execbuf.flags = I915_EXEC_EXT;
> + execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
> + execbuf.num_cliprects = 0;
> + fence.flags = I915_EXEC_FENCE_SIGNAL;
> + gem_execbuf(fd, &execbuf);
> + igt_assert(gem_bo_busy(fd, spin->handle));
> +
> + gem_close(fd, obj.handle);
> + obj.handle = gem_create(fd, 4096);
> + gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
This handle is leaked. Instead of recreating a handle every time, you
could export a sync-file.
> + n = 0;
> + for_each_engine(fd, engine) {
> + obj.handle = gem_create(fd, 4096);
> + gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
> +
> + /* No inter-engine synchronisation, will complete */
> + if (engine == I915_EXEC_BLT) {
> + execbuf.flags = engine;
> + execbuf.cliprects_ptr = 0;
> + execbuf.num_cliprects = 0;
> + gem_execbuf(fd, &execbuf);
> + gem_sync(fd, obj.handle);
> + igt_assert(gem_bo_busy(fd, spin->handle));
> + }
> + igt_assert(gem_bo_busy(fd, spin->handle));
> +
> + /* Now wait upon the blocked engine */
> + execbuf.flags = I915_EXEC_EXT | engine;
> + execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
> + execbuf.num_cliprects = 0;
> + fence.flags = I915_EXEC_FENCE_WAIT;
> + gem_execbuf(fd, &execbuf);
> +
> + igt_assert(gem_bo_busy(fd, obj.handle));
> + handle[n++] = obj.handle;
> + }
You could move this to a second context and avoid the differentiation
between engines:
fence.flags = I915_EXEC_FENCE_WAIT;
execbuf.rsvd1 = gem_context_create(fd);
n = 0;
for_each_engine(fd, engine) {
obj.handle = gem_create(fd, 4096);
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
/* Unsynchronised will execute ahead of the blockage */
execbuf.flags = engine;
execbuf.cliprects_ptr = 0;
gem_execbuf(fd, &execbuf);
gem_sync(fd, obj.handle);
/* Now wait upon the blocked engine */
execbuf.flags |= I915_EXEC_EXT;
execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
gem_execbuf(fd, &execbuf);
handle[n++] = obj.handle;
}
igt_assert(gem_bo_busy(fd, spin->handle));
gem_context_destroy(fd, execbuf.rsvd1);
> + syncobj_destroy(fd, fence.handle);
> +
> + for (int i = 0; i < n; i++)
> + igt_assert(gem_bo_busy(fd, handle[i]));
> +
> + igt_spin_free(fd, spin);
> +
> + for (int i = 0; i < n; i++) {
> + gem_sync(fd, handle[i]);
> + gem_close(fd, handle[i]);
> + }
> +}
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [igt-dev] [PATCH i-g-t 10/10] tests/i915/exec_fence: add timeline fence tests
2019-07-31 20:33 ` Chris Wilson
@ 2019-08-02 9:03 ` Lionel Landwerlin
2019-08-02 9:11 ` Chris Wilson
0 siblings, 1 reply; 21+ messages in thread
From: Lionel Landwerlin @ 2019-08-02 9:03 UTC (permalink / raw)
To: Chris Wilson, igt-dev
On 31/07/2019 23:33, Chris Wilson wrote:
> Quoting Lionel Landwerlin (2019-07-25 11:30:34)
>> +static void test_syncobj_timeline_wait(int fd)
>> +{
>> + const uint32_t bbe = MI_BATCH_BUFFER_END;
>> + struct drm_i915_gem_exec_object2 obj;
>> + struct drm_i915_gem_execbuffer2 execbuf;
>> + struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
>> + struct drm_i915_gem_exec_fence fence = {
>> + .handle = syncobj_create(fd, 0),
>> + };
>> + uint64_t value = 1;
>> + igt_spin_t *spin;
>> + unsigned engine;
>> + unsigned handle[16];
>> + int n;
>> +
>> + /* Check that we can use the syncobj to asynchronous wait prior to
>> + * execution.
>> + */
>> +
>> + gem_quiescent_gpu(fd);
>> +
>> + spin = igt_spin_new(fd);
>> +
>> + memset(&timeline_fences, 0, sizeof(timeline_fences));
>> + timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
>> + timeline_fences.fence_count = 1;
>> + timeline_fences.handles_ptr = to_user_pointer(&fence);
>> + timeline_fences.values_ptr = to_user_pointer(&value);
>> +
>> + memset(&execbuf, 0, sizeof(execbuf));
>> + execbuf.buffers_ptr = to_user_pointer(&obj);
>> + execbuf.buffer_count = 1;
>> +
>> + memset(&obj, 0, sizeof(obj));
>> + obj.handle = gem_create(fd, 4096);
>> + gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
>> +
>> + /* Queue a signaler from the blocked engine */
>> + execbuf.flags = I915_EXEC_EXT;
>> + execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
>> + execbuf.num_cliprects = 0;
>> + fence.flags = I915_EXEC_FENCE_SIGNAL;
>> + gem_execbuf(fd, &execbuf);
>> + igt_assert(gem_bo_busy(fd, spin->handle));
>> +
>> + gem_close(fd, obj.handle);
>> + obj.handle = gem_create(fd, 4096);
>> + gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
> This handle is leaked. Instead of recreating a handle every time, you
> could export a sync-file.
>
>> + n = 0;
>> + for_each_engine(fd, engine) {
>> + obj.handle = gem_create(fd, 4096);
>> + gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
>> +
>> + /* No inter-engine synchronisation, will complete */
>> + if (engine == I915_EXEC_BLT) {
>> + execbuf.flags = engine;
>> + execbuf.cliprects_ptr = 0;
>> + execbuf.num_cliprects = 0;
>> + gem_execbuf(fd, &execbuf);
>> + gem_sync(fd, obj.handle);
>> + igt_assert(gem_bo_busy(fd, spin->handle));
>> + }
>> + igt_assert(gem_bo_busy(fd, spin->handle));
>> +
>> + /* Now wait upon the blocked engine */
>> + execbuf.flags = I915_EXEC_EXT | engine;
>> + execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
>> + execbuf.num_cliprects = 0;
>> + fence.flags = I915_EXEC_FENCE_WAIT;
>> + gem_execbuf(fd, &execbuf);
>> +
>> + igt_assert(gem_bo_busy(fd, obj.handle));
>> + handle[n++] = obj.handle;
>> + }
> You could move this to a second context and avoid the differentiation
> between engines:
>
> fence.flags = I915_EXEC_FENCE_WAIT;
> execbuf.rsvd1 = gem_context_create(fd);
>
> n = 0;
> for_each_engine(fd, engine) {
> obj.handle = gem_create(fd, 4096);
> gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
>
> /* Unsynchronised will execute ahead of the blockage */
> execbuf.flags = engine;
> execbuf.cliprects_ptr = 0;
> gem_execbuf(fd, &execbuf);
> gem_sync(fd, obj.handle);
Not quite sure this ends up blocking on engine=1.
>
> /* Now wait upon the blocked engine */
> execbuf.flags |= I915_EXEC_EXT;
> execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
> gem_execbuf(fd, &execbuf);
> handle[n++] = obj.handle;
> }
> igt_assert(gem_bo_busy(fd, spin->handle));
> gem_context_destroy(fd, execbuf.rsvd1);
>
>> + syncobj_destroy(fd, fence.handle);
>> +
>> + for (int i = 0; i < n; i++)
>> + igt_assert(gem_bo_busy(fd, handle[i]));
>> +
>> + igt_spin_free(fd, spin);
>> +
>> + for (int i = 0; i < n; i++) {
>> + gem_sync(fd, handle[i]);
>> + gem_close(fd, handle[i]);
>> + }
>> +}
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [igt-dev] [PATCH i-g-t 10/10] tests/i915/exec_fence: add timeline fence tests
2019-08-02 9:03 ` Lionel Landwerlin
@ 2019-08-02 9:11 ` Chris Wilson
2019-08-02 9:47 ` Lionel Landwerlin
0 siblings, 1 reply; 21+ messages in thread
From: Chris Wilson @ 2019-08-02 9:11 UTC (permalink / raw)
To: Lionel Landwerlin, igt-dev
Quoting Lionel Landwerlin (2019-08-02 10:03:59)
> On 31/07/2019 23:33, Chris Wilson wrote:
> > Quoting Lionel Landwerlin (2019-07-25 11:30:34)
> > You could move this to a second context and avoid the differentiation
> > between engines:
> >
> > fence.flags = I915_EXEC_FENCE_WAIT;
> > execbuf.rsvd1 = gem_context_create(fd);
> >
> > n = 0;
> > for_each_engine(fd, engine) {
> > obj.handle = gem_create(fd, 4096);
> > gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
> >
> > /* Unsynchronised will execute ahead of the blockage */
> > execbuf.flags = engine;
> > execbuf.cliprects_ptr = 0;
> > gem_execbuf(fd, &execbuf);
> > gem_sync(fd, obj.handle);
>
>
> Not quite sure this ends up blocking on engine=1.
It's a different context, it's allowed to execute ahead of the spinner
when the spinner exceeds its timeslice -- so long as preemption is
enabled on the spinner. If not, that's kernel bug.
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [igt-dev] [PATCH i-g-t 10/10] tests/i915/exec_fence: add timeline fence tests
2019-08-02 9:11 ` Chris Wilson
@ 2019-08-02 9:47 ` Lionel Landwerlin
0 siblings, 0 replies; 21+ messages in thread
From: Lionel Landwerlin @ 2019-08-02 9:47 UTC (permalink / raw)
To: Chris Wilson, igt-dev
On 02/08/2019 12:11, Chris Wilson wrote:
> Quoting Lionel Landwerlin (2019-08-02 10:03:59)
>> On 31/07/2019 23:33, Chris Wilson wrote:
>>> Quoting Lionel Landwerlin (2019-07-25 11:30:34)
>>> You could move this to a second context and avoid the differentiation
>>> between engines:
>>>
>>> fence.flags = I915_EXEC_FENCE_WAIT;
>>> execbuf.rsvd1 = gem_context_create(fd);
>>>
>>> n = 0;
>>> for_each_engine(fd, engine) {
>>> obj.handle = gem_create(fd, 4096);
>>> gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
>>>
>>> /* Unsynchronised will execute ahead of the blockage */
>>> execbuf.flags = engine;
>>> execbuf.cliprects_ptr = 0;
>>> gem_execbuf(fd, &execbuf);
>>> gem_sync(fd, obj.handle);
>>
>> Not quite sure this ends up blocking on engine=1.
> It's a different context, it's allowed to execute ahead of the spinner
> when the spinner exceeds its timeslice -- so long as preemption is
> enabled on the spinner. If not, that's kernel bug.
> -Chris
>
Hmm... modifying the existing test_syncobj_wait() to do the same leads
to a deadlock.
That's on a stable 5.0 kernel, so that appears unrelated to my timeline
semaphore changes.
-Lionel
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [igt-dev] [PATCH i-g-t 10/10] tests/i915/exec_fence: add timeline fence tests
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 10/10] tests/i915/exec_fence: add timeline fence tests Lionel Landwerlin
2019-07-31 20:33 ` Chris Wilson
@ 2019-07-31 20:35 ` Chris Wilson
1 sibling, 0 replies; 21+ messages in thread
From: Chris Wilson @ 2019-07-31 20:35 UTC (permalink / raw)
To: Lionel Landwerlin, igt-dev
Quoting Lionel Landwerlin (2019-07-25 11:30:34)
> We can now give a tuple (handle, point_value) for timeline semaphores.
What I didn't notice was a test for passing value = 0 into the timeline
semaphores and checking that it degraded into a normal syncobj (and
defining the behavior of value=0 with a live timeline, which afaict
means wait-on-last or replace-on-signal).
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 21+ messages in thread
* [igt-dev] ✗ GitLab.Pipeline: warning for Vulkan performance queries support & others
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
` (9 preceding siblings ...)
2019-07-25 10:30 ` [igt-dev] [PATCH i-g-t 10/10] tests/i915/exec_fence: add timeline fence tests Lionel Landwerlin
@ 2019-07-25 11:18 ` Patchwork
2019-07-25 11:19 ` [igt-dev] ✓ Fi.CI.BAT: success " Patchwork
2019-07-25 16:37 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
12 siblings, 0 replies; 21+ messages in thread
From: Patchwork @ 2019-07-25 11:18 UTC (permalink / raw)
To: Lionel Landwerlin; +Cc: igt-dev
== Series Details ==
Series: Vulkan performance queries support & others
URL : https://patchwork.freedesktop.org/series/64220/
State : warning
== Summary ==
Pipeline status: FAILED.
See https://gitlab.freedesktop.org/gfx-ci/igt-ci-tags/pipelines/51185 for more details.
== Logs ==
For more details see: https://gitlab.freedesktop.org/gfx-ci/igt-ci-tags/pipelines/51185
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 21+ messages in thread* [igt-dev] ✓ Fi.CI.BAT: success for Vulkan performance queries support & others
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
` (10 preceding siblings ...)
2019-07-25 11:18 ` [igt-dev] ✗ GitLab.Pipeline: warning for Vulkan performance queries support & others Patchwork
@ 2019-07-25 11:19 ` Patchwork
2019-07-25 16:37 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
12 siblings, 0 replies; 21+ messages in thread
From: Patchwork @ 2019-07-25 11:19 UTC (permalink / raw)
To: Lionel Landwerlin; +Cc: igt-dev
== Series Details ==
Series: Vulkan performance queries support & others
URL : https://patchwork.freedesktop.org/series/64220/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_6549 -> IGTPW_3293
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://patchwork.freedesktop.org/api/1.0/series/64220/revisions/1/mbox/
Known issues
------------
Here are the changes found in IGTPW_3293 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gem_render_tiled_blits@basic:
- fi-icl-u3: [PASS][1] -> [DMESG-WARN][2] ([fdo#107724])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/fi-icl-u3/igt@gem_render_tiled_blits@basic.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/fi-icl-u3/igt@gem_render_tiled_blits@basic.html
* igt@i915_selftest@live_hangcheck:
- fi-kbl-guc: [PASS][3] -> [INCOMPLETE][4] ([fdo#108744])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/fi-kbl-guc/igt@i915_selftest@live_hangcheck.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/fi-kbl-guc/igt@i915_selftest@live_hangcheck.html
* igt@kms_busy@basic-flip-a:
- fi-kbl-7567u: [PASS][5] -> [SKIP][6] ([fdo#109271] / [fdo#109278]) +2 similar issues
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/fi-kbl-7567u/igt@kms_busy@basic-flip-a.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/fi-kbl-7567u/igt@kms_busy@basic-flip-a.html
#### Possible fixes ####
* igt@gem_exec_suspend@basic-s3:
- fi-blb-e6850: [INCOMPLETE][7] ([fdo#107718]) -> [PASS][8]
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/fi-blb-e6850/igt@gem_exec_suspend@basic-s3.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/fi-blb-e6850/igt@gem_exec_suspend@basic-s3.html
* igt@i915_module_load@reload-with-fault-injection:
- fi-snb-2600: [INCOMPLETE][9] ([fdo#105411]) -> [PASS][10]
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/fi-snb-2600/igt@i915_module_load@reload-with-fault-injection.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/fi-snb-2600/igt@i915_module_load@reload-with-fault-injection.html
- fi-snb-2520m: [INCOMPLETE][11] -> [PASS][12]
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/fi-snb-2520m/igt@i915_module_load@reload-with-fault-injection.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/fi-snb-2520m/igt@i915_module_load@reload-with-fault-injection.html
* igt@kms_frontbuffer_tracking@basic:
- fi-icl-u2: [FAIL][13] ([fdo#103167]) -> [PASS][14]
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/fi-icl-u2/igt@kms_frontbuffer_tracking@basic.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/fi-icl-u2/igt@kms_frontbuffer_tracking@basic.html
[fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
[fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
[fdo#107718]: https://bugs.freedesktop.org/show_bug.cgi?id=107718
[fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
[fdo#108744]: https://bugs.freedesktop.org/show_bug.cgi?id=108744
[fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
[fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
Participating hosts (54 -> 46)
------------------------------
Missing (8): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-icl-y fi-byt-clapper fi-bdw-samus
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_5110 -> IGTPW_3293
CI-20190529: 20190529
CI_DRM_6549: b3ff1a4436815b4baf26a73dccfa71527a2bfc4e @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_3293: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/
IGT_5110: 9085f552a11156c5b856593361b30606a9424c01 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
== Testlist changes ==
+++ 137 lines
--- 0 lines
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 21+ messages in thread* [igt-dev] ✗ Fi.CI.IGT: failure for Vulkan performance queries support & others
2019-07-25 10:30 [igt-dev] [PATCH i-g-t 00/10] Vulkan performance queries support & others Lionel Landwerlin
` (11 preceding siblings ...)
2019-07-25 11:19 ` [igt-dev] ✓ Fi.CI.BAT: success " Patchwork
@ 2019-07-25 16:37 ` Patchwork
12 siblings, 0 replies; 21+ messages in thread
From: Patchwork @ 2019-07-25 16:37 UTC (permalink / raw)
To: Lionel Landwerlin; +Cc: igt-dev
== Series Details ==
Series: Vulkan performance queries support & others
URL : https://patchwork.freedesktop.org/series/64220/
State : failure
== Summary ==
CI Bug Log - changes from CI_DRM_6549_full -> IGTPW_3293_full
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with IGTPW_3293_full absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in IGTPW_3293_full, please notify your bug team to allow them
to document this new failure mode, which will reduce false positives in CI.
External URL: https://patchwork.freedesktop.org/api/1.0/series/64220/revisions/1/mbox/
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in IGTPW_3293_full:
### IGT changes ###
#### Possible regressions ####
* {igt@syncobj_timeline@wait-any-interrupted} (NEW):
- shard-iclb: NOTRUN -> [SKIP][1] +128 similar issues
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-iclb3/igt@syncobj_timeline@wait-any-interrupted.html
New tests
---------
New tests have been introduced between CI_DRM_6549_full and IGTPW_3293_full:
### New IGT tests (129) ###
* igt@i915_query@query-perf-config-data-invalid:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@i915_query@query-perf-config-list-invalid:
- Statuses : 5 skip(s)
- Exec time: [0.0] s
* igt@i915_query@query-perf-configs:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@perf@exec-perf:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@perf@exec-perf-invalid:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@perf@invalid-disabled-preemption:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@perf@single-ctx-counters-disabled-preemption:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@perf@unprivileged-single-ctx-counters-disabled-preemption:
- Statuses : 5 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@32bits-limit:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@device-signal-unordered:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@device-submit-unordered:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-for-submit-available-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-for-submit-available-unsubmitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-for-submit-available-unsubmitted-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-for-submit-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-for-submit-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-for-submit-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-for-submit-unsubmitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-for-submit-unsubmitted-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-for-submit-unsubmitted-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-all-submitted-signaled:
- Statuses : 5 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-for-submit-available-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-for-submit-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-for-submit-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-for-submit-unsubmitted-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-multi-wait-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-single-wait-all-for-submit-available-unsubmitted:
- Statuses : 5 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-single-wait-all-for-submit-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-single-wait-all-for-submit-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-single-wait-all-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-single-wait-for-submit-available-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-single-wait-for-submit-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-single-wait-for-submit-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@etime-single-wait-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@host-signal-ordered:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@host-signal-points:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-all-available-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-all-available-unsubmitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-all-available-unsubmitted-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-all-available-unsubmitted-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-all-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-all-unsubmitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-all-unsubmitted-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-all-unsubmitted-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-available-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-available-unsubmitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-available-unsubmitted-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-available-unsubmitted-submitted-signaled:
- Statuses : 5 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-unsubmitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-unsubmitted-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-multi-wait-unsubmitted-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-query-bad-pad:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-query-illegal-handle:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-query-one-illegal-handle:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-query-zero-handles:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-signal-array:
- Statuses : 5 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-signal-bad-pad:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-signal-illegal-handle:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-signal-illegal-point:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-signal-one-illegal-handle:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-signal-zero-handles:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-single-wait-all-available-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-single-wait-all-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-single-wait-available-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-single-wait-unsubmitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-transfer-bad-pad:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-transfer-illegal-handle:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-transfer-non-existent-point:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-wait-bad-flags:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-wait-illegal-handle:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@invalid-wait-zero-handles:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-all-available-signaled:
- Statuses : 5 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-all-available-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-all-available-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-all-for-submit-available-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-all-for-submit-available-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-all-for-submit-available-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-all-for-submit-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-all-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-available-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-available-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-available-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-for-submit-available-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-for-submit-available-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-for-submit-available-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-for-submit-available-unsubmitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-for-submit-available-unsubmitted-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-for-submit-available-unsubmitted-submitted-signaled:
- Statuses : 5 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-for-submit-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-for-submit-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-for-submit-unsubmitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-for-submit-unsubmitted-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@multi-wait-submitted-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@reset-during-wait-for-submit:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@reset-multiple-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@reset-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@reset-unsignaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@signal:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@signal-point-0:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-all-available-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-all-available-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-all-for-submit-available-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-all-for-submit-available-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-all-for-submit-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-all-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-available-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-available-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-for-submit-available-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-for-submit-available-submitted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-for-submit-signaled:
- Statuses : 5 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@single-wait-signaled:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@transfer-timeline-point:
- Statuses : 5 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-all-complex:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-all-delayed-signal:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-all-for-submit-complex:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-all-for-submit-delayed-submit:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-all-for-submit-snapshot:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-all-interrupted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-all-snapshot:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-any-complex:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-any-interrupted:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-any-snapshot:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-delayed-signal:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-for-submit-complex:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-for-submit-delayed-submit:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@syncobj_timeline@wait-for-submit-snapshot:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
Known issues
------------
Here are the changes found in IGTPW_3293_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@kms_flip@2x-flip-vs-expired-vblank:
- shard-hsw: [PASS][2] -> [FAIL][3] ([fdo#102887])
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-hsw5/igt@kms_flip@2x-flip-vs-expired-vblank.html
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-hsw8/igt@kms_flip@2x-flip-vs-expired-vblank.html
* igt@kms_frontbuffer_tracking@fbc-stridechange:
- shard-iclb: [PASS][4] -> [FAIL][5] ([fdo#103167]) +6 similar issues
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-iclb6/igt@kms_frontbuffer_tracking@fbc-stridechange.html
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-stridechange.html
* igt@kms_psr2_su@page_flip:
- shard-iclb: [PASS][6] -> [SKIP][7] ([fdo#109642] / [fdo#111068])
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-iclb2/igt@kms_psr2_su@page_flip.html
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-iclb8/igt@kms_psr2_su@page_flip.html
* igt@kms_psr@psr2_dpms:
- shard-iclb: [PASS][8] -> [SKIP][9] ([fdo#109441]) +3 similar issues
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-iclb2/igt@kms_psr@psr2_dpms.html
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-iclb8/igt@kms_psr@psr2_dpms.html
* igt@kms_setmode@basic:
- shard-kbl: [PASS][10] -> [FAIL][11] ([fdo#99912])
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-kbl6/igt@kms_setmode@basic.html
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-kbl2/igt@kms_setmode@basic.html
* igt@kms_vblank@pipe-a-ts-continuation-suspend:
- shard-kbl: [PASS][12] -> [DMESG-WARN][13] ([fdo#108566]) +4 similar issues
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-kbl2/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
- shard-apl: [PASS][14] -> [DMESG-WARN][15] ([fdo#108566]) +2 similar issues
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-apl6/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-apl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
#### Possible fixes ####
* igt@gem_tiled_swapping@non-threaded:
- shard-kbl: [DMESG-WARN][16] ([fdo#108686]) -> [PASS][17]
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-kbl3/igt@gem_tiled_swapping@non-threaded.html
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-kbl3/igt@gem_tiled_swapping@non-threaded.html
* igt@i915_selftest@live_hangcheck:
- shard-iclb: [INCOMPLETE][18] ([fdo#107713] / [fdo#108569]) -> [PASS][19]
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-iclb7/igt@i915_selftest@live_hangcheck.html
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-iclb6/igt@i915_selftest@live_hangcheck.html
* igt@kms_cursor_crc@pipe-b-cursor-suspend:
- shard-kbl: [DMESG-WARN][20] ([fdo#108566]) -> [PASS][21]
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-kbl3/igt@kms_cursor_crc@pipe-b-cursor-suspend.html
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-kbl2/igt@kms_cursor_crc@pipe-b-cursor-suspend.html
* igt@kms_cursor_crc@pipe-c-cursor-suspend:
- shard-kbl: [FAIL][22] ([fdo#103232]) -> [PASS][23]
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-kbl3/igt@kms_cursor_crc@pipe-c-cursor-suspend.html
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/shard-kbl3/igt@kms_cursor_crc@pipe-c-cursor-suspend.html
* igt@kms_flip@flip-vs-suspend:
- shard-hsw: [INCOMPLETE][24] ([fdo#103540]) -> [PASS][25]
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6549/shard-hsw4/igt@kms_flip@flip-vs-suspend.html
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3293/
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 21+ messages in thread