Skip to content

Commit 76ca3a2

Browse files
committed
drm/xe/uapi: Order sections
This patch doesn't modify any text or uapi entries themselves. It only move things up and down aiming a better organization of the uAPI. While fixing the documentation I noticed that query_engine_cs_cycles was in the middle of the memory_region info. Then I noticed more mismatches on the order when compared to the order of the IOCTL and QUERY entries declaration. So this patch aims to bring some order to the uAPI so it gets easier to read and the documentation generated in the end is able to tell a consistent story. Overall order: 1. IOCTL definition 2. Extension definition and helper structs 3. IOCTL's Query structs in the order of the Query's entries. 4. The rest of IOCTL structs in the order of IOCTL declaration. 5. uEvents Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com> Acked-by: José Roberto de Souza <jose.souza@intel.com> Acked-by: Mateusz Naklicki <mateusz.naklicki@intel.com> Signed-off-by: Francois Dugast <francois.dugast@intel.com>
1 parent 801989b commit 76ca3a2

1 file changed

Lines changed: 130 additions & 122 deletions

File tree

include/uapi/drm/xe_drm.h

Lines changed: 130 additions & 122 deletions
Original file line numberDiff line numberDiff line change
@@ -12,19 +12,48 @@
1212
extern "C" {
1313
#endif
1414

15-
/* Please note that modifications to all structs defined here are
15+
/*
16+
* Please note that modifications to all structs defined here are
1617
* subject to backwards-compatibility constraints.
18+
* Sections in this file are organized as follows:
19+
* 1. IOCTL definition
20+
* 2. Extension definition and helper structs
21+
* 3. IOCTL's Query structs in the order of the Query's entries.
22+
* 4. The rest of IOCTL structs in the order of IOCTL declaration.
23+
* 5. uEvents
1724
*/
1825

19-
/**
20-
* DOC: uevent generated by xe on it's pci node.
26+
/*
27+
* xe specific ioctls.
2128
*
22-
* DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
23-
* fails. The value supplied with the event is always "NEEDS_RESET".
24-
* Additional information supplied is tile id and gt id of the gt unit for
25-
* which reset has failed.
29+
* The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
30+
* [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
31+
* against DRM_COMMAND_BASE and should be between [0x0, 0x60).
2632
*/
27-
#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
33+
#define DRM_XE_DEVICE_QUERY 0x00
34+
#define DRM_XE_GEM_CREATE 0x01
35+
#define DRM_XE_GEM_MMAP_OFFSET 0x02
36+
#define DRM_XE_VM_CREATE 0x03
37+
#define DRM_XE_VM_DESTROY 0x04
38+
#define DRM_XE_VM_BIND 0x05
39+
#define DRM_XE_EXEC_QUEUE_CREATE 0x06
40+
#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
41+
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
42+
#define DRM_XE_EXEC 0x09
43+
#define DRM_XE_WAIT_USER_FENCE 0x0a
44+
/* Must be kept compact -- no holes */
45+
46+
#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
47+
#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
48+
#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
49+
#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
50+
#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
51+
#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
52+
#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
53+
#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
54+
#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
55+
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
56+
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
2857

2958
/**
3059
* struct drm_xe_user_extension - Base class for defining a chain of extensions
@@ -90,37 +119,25 @@ struct drm_xe_user_extension {
90119
__u32 pad;
91120
};
92121

93-
/*
94-
* xe specific ioctls.
95-
*
96-
* The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
97-
* [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
98-
* against DRM_COMMAND_BASE and should be between [0x0, 0x60).
122+
/**
123+
* struct drm_xe_ext_set_property - XE set property extension
99124
*/
100-
#define DRM_XE_DEVICE_QUERY 0x00
101-
#define DRM_XE_GEM_CREATE 0x01
102-
#define DRM_XE_GEM_MMAP_OFFSET 0x02
103-
#define DRM_XE_VM_CREATE 0x03
104-
#define DRM_XE_VM_DESTROY 0x04
105-
#define DRM_XE_VM_BIND 0x05
106-
#define DRM_XE_EXEC_QUEUE_CREATE 0x06
107-
#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
108-
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
109-
#define DRM_XE_EXEC 0x09
110-
#define DRM_XE_WAIT_USER_FENCE 0x0a
111-
/* Must be kept compact -- no holes */
125+
struct drm_xe_ext_set_property {
126+
/** @base: base user extension */
127+
struct drm_xe_user_extension base;
112128

113-
#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
114-
#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
115-
#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
116-
#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
117-
#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
118-
#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
119-
#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
120-
#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
121-
#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
122-
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
123-
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
129+
/** @property: property to set */
130+
__u32 property;
131+
132+
/** @pad: MBZ */
133+
__u32 pad;
134+
135+
/** @value: property value */
136+
__u64 value;
137+
138+
/** @reserved: Reserved */
139+
__u64 reserved[2];
140+
};
124141

125142
/**
126143
* struct drm_xe_engine_class_instance - instance of an engine class
@@ -274,57 +291,6 @@ struct drm_xe_mem_region {
274291
__u64 reserved[6];
275292
};
276293

277-
/**
278-
* struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
279-
*
280-
* If a query is made with a struct drm_xe_device_query where .query is equal to
281-
* DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
282-
* in .data. struct drm_xe_query_engine_cycles is allocated by the user and
283-
* .data points to this allocated structure.
284-
*
285-
* The query returns the engine cycles, which along with GT's @reference_clock,
286-
* can be used to calculate the engine timestamp. In addition the
287-
* query returns a set of cpu timestamps that indicate when the command
288-
* streamer cycle count was captured.
289-
*/
290-
struct drm_xe_query_engine_cycles {
291-
/**
292-
* @eci: This is input by the user and is the engine for which command
293-
* streamer cycles is queried.
294-
*/
295-
struct drm_xe_engine_class_instance eci;
296-
297-
/**
298-
* @clockid: This is input by the user and is the reference clock id for
299-
* CPU timestamp. For definition, see clock_gettime(2) and
300-
* perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
301-
* CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
302-
*/
303-
__s32 clockid;
304-
305-
/** @width: Width of the engine cycle counter in bits. */
306-
__u32 width;
307-
308-
/**
309-
* @engine_cycles: Engine cycles as read from its register
310-
* at 0x358 offset.
311-
*/
312-
__u64 engine_cycles;
313-
314-
/**
315-
* @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
316-
* reading the engine_cycles register using the reference clockid set by the
317-
* user.
318-
*/
319-
__u64 cpu_timestamp;
320-
321-
/**
322-
* @cpu_delta: Time delta in ns captured around reading the lower dword
323-
* of the engine_cycles register.
324-
*/
325-
__u64 cpu_delta;
326-
};
327-
328294
/**
329295
* struct drm_xe_query_mem_regions - describe memory regions
330296
*
@@ -482,6 +448,57 @@ struct drm_xe_query_topology_mask {
482448
__u8 mask[];
483449
};
484450

451+
/**
452+
* struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
453+
*
454+
* If a query is made with a struct drm_xe_device_query where .query is equal to
455+
* DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
456+
* in .data. struct drm_xe_query_engine_cycles is allocated by the user and
457+
* .data points to this allocated structure.
458+
*
459+
* The query returns the engine cycles, which along with GT's @reference_clock,
460+
* can be used to calculate the engine timestamp. In addition the
461+
* query returns a set of cpu timestamps that indicate when the command
462+
* streamer cycle count was captured.
463+
*/
464+
struct drm_xe_query_engine_cycles {
465+
/**
466+
* @eci: This is input by the user and is the engine for which command
467+
* streamer cycles is queried.
468+
*/
469+
struct drm_xe_engine_class_instance eci;
470+
471+
/**
472+
* @clockid: This is input by the user and is the reference clock id for
473+
* CPU timestamp. For definition, see clock_gettime(2) and
474+
* perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
475+
* CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
476+
*/
477+
__s32 clockid;
478+
479+
/** @width: Width of the engine cycle counter in bits. */
480+
__u32 width;
481+
482+
/**
483+
* @engine_cycles: Engine cycles as read from its register
484+
* at 0x358 offset.
485+
*/
486+
__u64 engine_cycles;
487+
488+
/**
489+
* @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
490+
* reading the engine_cycles register using the reference clockid set by the
491+
* user.
492+
*/
493+
__u64 cpu_timestamp;
494+
495+
/**
496+
* @cpu_delta: Time delta in ns captured around reading the lower dword
497+
* of the engine_cycles register.
498+
*/
499+
__u64 cpu_delta;
500+
};
501+
485502
/**
486503
* struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
487504
* structure to query device information
@@ -668,26 +685,6 @@ struct drm_xe_gem_mmap_offset {
668685
__u64 reserved[2];
669686
};
670687

671-
/**
672-
* struct drm_xe_ext_set_property - XE set property extension
673-
*/
674-
struct drm_xe_ext_set_property {
675-
/** @base: base user extension */
676-
struct drm_xe_user_extension base;
677-
678-
/** @property: property to set */
679-
__u32 property;
680-
681-
/** @pad: MBZ */
682-
__u32 pad;
683-
684-
/** @value: property value */
685-
__u64 value;
686-
687-
/** @reserved: Reserved */
688-
__u64 reserved[2];
689-
};
690-
691688
/**
692689
* struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
693690
*
@@ -976,6 +973,20 @@ struct drm_xe_exec_queue_create {
976973
__u64 reserved[2];
977974
};
978975

976+
/**
977+
* struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
978+
*/
979+
struct drm_xe_exec_queue_destroy {
980+
/** @exec_queue_id: Exec queue ID */
981+
__u32 exec_queue_id;
982+
983+
/** @pad: MBZ */
984+
__u32 pad;
985+
986+
/** @reserved: Reserved */
987+
__u64 reserved[2];
988+
};
989+
979990
/**
980991
* struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
981992
*
@@ -1000,20 +1011,6 @@ struct drm_xe_exec_queue_get_property {
10001011
__u64 reserved[2];
10011012
};
10021013

1003-
/**
1004-
* struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
1005-
*/
1006-
struct drm_xe_exec_queue_destroy {
1007-
/** @exec_queue_id: Exec queue ID */
1008-
__u32 exec_queue_id;
1009-
1010-
/** @pad: MBZ */
1011-
__u32 pad;
1012-
1013-
/** @reserved: Reserved */
1014-
__u64 reserved[2];
1015-
};
1016-
10171014
/**
10181015
* struct drm_xe_sync - sync object
10191016
*
@@ -1180,6 +1177,17 @@ struct drm_xe_wait_user_fence {
11801177
/** @reserved: Reserved */
11811178
__u64 reserved[2];
11821179
};
1180+
1181+
/**
1182+
* DOC: uevent generated by xe on it's pci node.
1183+
*
1184+
* DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
1185+
* fails. The value supplied with the event is always "NEEDS_RESET".
1186+
* Additional information supplied is tile id and gt id of the gt unit for
1187+
* which reset has failed.
1188+
*/
1189+
#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
1190+
11831191
#if defined(__cplusplus)
11841192
}
11851193
#endif

0 commit comments

Comments
 (0)