@@ -76,6 +76,8 @@ struct drm_nouveau_channel_free {
7676#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
7777#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
7878#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
79+ /* The BO will never be shared via import or export. */
80+ #define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
7981
8082#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
8183#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
@@ -164,6 +166,215 @@ struct drm_nouveau_gem_cpu_fini {
164166 __u32 handle ;
165167};
166168
169+ /**
170+ * struct drm_nouveau_sync - sync object
171+ *
172+ * This structure serves as synchronization mechanism for (potentially)
173+ * asynchronous operations such as EXEC or VM_BIND.
174+ */
175+ struct drm_nouveau_sync {
176+ /**
177+ * @flags: the flags for a sync object
178+ *
179+ * The first 8 bits are used to determine the type of the sync object.
180+ */
181+ __u32 flags ;
182+ #define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
183+ #define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
184+ #define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
185+ /**
186+ * @handle: the handle of the sync object
187+ */
188+ __u32 handle ;
189+ /**
190+ * @timeline_value:
191+ *
192+ * The timeline point of the sync object in case the syncobj is of
193+ * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
194+ */
195+ __u64 timeline_value ;
196+ };
197+
198+ /**
199+ * struct drm_nouveau_vm_init - GPU VA space init structure
200+ *
201+ * Used to initialize the GPU's VA space for a user client, telling the kernel
202+ * which portion of the VA space is managed by the UMD and kernel respectively.
203+ *
204+ * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
205+ * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
206+ * with -ENOSYS.
207+ */
208+ struct drm_nouveau_vm_init {
209+ /**
210+ * @kernel_managed_addr: start address of the kernel managed VA space
211+ * region
212+ */
213+ __u64 kernel_managed_addr ;
214+ /**
215+ * @kernel_managed_size: size of the kernel managed VA space region in
216+ * bytes
217+ */
218+ __u64 kernel_managed_size ;
219+ };
220+
221+ /**
222+ * struct drm_nouveau_vm_bind_op - VM_BIND operation
223+ *
224+ * This structure represents a single VM_BIND operation. UMDs should pass
225+ * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
226+ */
227+ struct drm_nouveau_vm_bind_op {
228+ /**
229+ * @op: the operation type
230+ */
231+ __u32 op ;
232+ /**
233+ * @DRM_NOUVEAU_VM_BIND_OP_MAP:
234+ *
235+ * Map a GEM object to the GPU's VA space. Optionally, the
236+ * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
237+ * create sparse mappings for the given range.
238+ */
239+ #define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
240+ /**
241+ * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
242+ *
243+ * Unmap an existing mapping in the GPU's VA space. If the region the mapping
244+ * is located in is a sparse region, new sparse mappings are created where the
245+ * unmapped (memory backed) mapping was mapped previously. To remove a sparse
246+ * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
247+ */
248+ #define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
249+ /**
250+ * @flags: the flags for a &drm_nouveau_vm_bind_op
251+ */
252+ __u32 flags ;
253+ /**
254+ * @DRM_NOUVEAU_VM_BIND_SPARSE:
255+ *
256+ * Indicates that an allocated VA space region should be sparse.
257+ */
258+ #define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
259+ /**
260+ * @handle: the handle of the DRM GEM object to map
261+ */
262+ __u32 handle ;
263+ /**
264+ * @pad: 32 bit padding, should be 0
265+ */
266+ __u32 pad ;
267+ /**
268+ * @addr:
269+ *
270+ * the address the VA space region or (memory backed) mapping should be mapped to
271+ */
272+ __u64 addr ;
273+ /**
274+ * @bo_offset: the offset within the BO backing the mapping
275+ */
276+ __u64 bo_offset ;
277+ /**
278+ * @range: the size of the requested mapping in bytes
279+ */
280+ __u64 range ;
281+ };
282+
283+ /**
284+ * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
285+ */
286+ struct drm_nouveau_vm_bind {
287+ /**
288+ * @op_count: the number of &drm_nouveau_vm_bind_op
289+ */
290+ __u32 op_count ;
291+ /**
292+ * @flags: the flags for a &drm_nouveau_vm_bind ioctl
293+ */
294+ __u32 flags ;
295+ /**
296+ * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
297+ *
298+ * Indicates that the given VM_BIND operation should be executed asynchronously
299+ * by the kernel.
300+ *
301+ * If this flag is not supplied the kernel executes the associated operations
302+ * synchronously and doesn't accept any &drm_nouveau_sync objects.
303+ */
304+ #define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
305+ /**
306+ * @wait_count: the number of wait &drm_nouveau_syncs
307+ */
308+ __u32 wait_count ;
309+ /**
310+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
311+ */
312+ __u32 sig_count ;
313+ /**
314+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
315+ */
316+ __u64 wait_ptr ;
317+ /**
318+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
319+ */
320+ __u64 sig_ptr ;
321+ /**
322+ * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
323+ */
324+ __u64 op_ptr ;
325+ };
326+
327+ /**
328+ * struct drm_nouveau_exec_push - EXEC push operation
329+ *
330+ * This structure represents a single EXEC push operation. UMDs should pass an
331+ * array of this structure via struct drm_nouveau_exec's &push_ptr field.
332+ */
333+ struct drm_nouveau_exec_push {
334+ /**
335+ * @va: the virtual address of the push buffer mapping
336+ */
337+ __u64 va ;
338+ /**
339+ * @va_len: the length of the push buffer mapping
340+ */
341+ __u64 va_len ;
342+ };
343+
344+ /**
345+ * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
346+ */
347+ struct drm_nouveau_exec {
348+ /**
349+ * @channel: the channel to execute the push buffer in
350+ */
351+ __u32 channel ;
352+ /**
353+ * @push_count: the number of &drm_nouveau_exec_push ops
354+ */
355+ __u32 push_count ;
356+ /**
357+ * @wait_count: the number of wait &drm_nouveau_syncs
358+ */
359+ __u32 wait_count ;
360+ /**
361+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
362+ */
363+ __u32 sig_count ;
364+ /**
365+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
366+ */
367+ __u64 wait_ptr ;
368+ /**
369+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
370+ */
371+ __u64 sig_ptr ;
372+ /**
373+ * @push_ptr: pointer to &drm_nouveau_exec_push ops
374+ */
375+ __u64 push_ptr ;
376+ };
377+
167378#define DRM_NOUVEAU_GETPARAM 0x00
168379#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
169380#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
@@ -174,6 +385,9 @@ struct drm_nouveau_gem_cpu_fini {
174385#define DRM_NOUVEAU_NVIF 0x07
175386#define DRM_NOUVEAU_SVM_INIT 0x08
176387#define DRM_NOUVEAU_SVM_BIND 0x09
388+ #define DRM_NOUVEAU_VM_INIT 0x10
389+ #define DRM_NOUVEAU_VM_BIND 0x11
390+ #define DRM_NOUVEAU_EXEC 0x12
177391#define DRM_NOUVEAU_GEM_NEW 0x40
178392#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
179393#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
@@ -239,6 +453,9 @@ struct drm_nouveau_svm_bind {
239453#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
240454#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
241455
456+ #define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
457+ #define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
458+ #define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
242459#if defined(__cplusplus )
243460 }
244461#endif
0 commit comments