xref: /openbmc/linux/include/uapi/drm/nouveau_drm.h (revision b181f7029bd71238ac2754ce7052dffd69432085)
1718dceddSDavid Howells /*
2718dceddSDavid Howells  * Copyright 2005 Stephane Marchesin.
3718dceddSDavid Howells  * All Rights Reserved.
4718dceddSDavid Howells  *
5718dceddSDavid Howells  * Permission is hereby granted, free of charge, to any person obtaining a
6718dceddSDavid Howells  * copy of this software and associated documentation files (the "Software"),
7718dceddSDavid Howells  * to deal in the Software without restriction, including without limitation
8718dceddSDavid Howells  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9718dceddSDavid Howells  * and/or sell copies of the Software, and to permit persons to whom the
10718dceddSDavid Howells  * Software is furnished to do so, subject to the following conditions:
11718dceddSDavid Howells  *
12718dceddSDavid Howells  * The above copyright notice and this permission notice (including the next
13718dceddSDavid Howells  * paragraph) shall be included in all copies or substantial portions of the
14718dceddSDavid Howells  * Software.
15718dceddSDavid Howells  *
16718dceddSDavid Howells  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17718dceddSDavid Howells  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18718dceddSDavid Howells  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19718dceddSDavid Howells  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20718dceddSDavid Howells  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21718dceddSDavid Howells  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22718dceddSDavid Howells  * OTHER DEALINGS IN THE SOFTWARE.
23718dceddSDavid Howells  */
24718dceddSDavid Howells 
25718dceddSDavid Howells #ifndef __NOUVEAU_DRM_H__
26718dceddSDavid Howells #define __NOUVEAU_DRM_H__
27718dceddSDavid Howells 
2827111a23SBen Skeggs #define DRM_NOUVEAU_EVENT_NVIF                                       0x80000000
2927111a23SBen Skeggs 
30c6734c68SEmil Velikov #include "drm.h"
318860487eSMikko Rapeli 
328daf6359SEmil Velikov #if defined(__cplusplus)
338daf6359SEmil Velikov extern "C" {
348daf6359SEmil Velikov #endif
358daf6359SEmil Velikov 
367a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_PCI_VENDOR      3
377a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_PCI_DEVICE      4
387a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_BUS_TYPE        5
397a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_FB_SIZE         8
407a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_AGP_SIZE        9
417a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_CHIPSET_ID      11
427a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
437a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_GRAPH_UNITS     13
447a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_PTIMER_TIME     14
457a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_HAS_BO_USAGE    15
467a5d5f9cSDave Airlie #define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16
47d59e75eeSDanilo Krummrich 
48f2cab4b3SRandy Dunlap /*
49f2cab4b3SRandy Dunlap  * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
50d59e75eeSDanilo Krummrich  *
51d59e75eeSDanilo Krummrich  * Query the maximum amount of IBs that can be pushed through a single
52d59e75eeSDanilo Krummrich  * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
53d59e75eeSDanilo Krummrich  * ioctl().
54d59e75eeSDanilo Krummrich  */
55d59e75eeSDanilo Krummrich #define NOUVEAU_GETPARAM_EXEC_PUSH_MAX   17
56d59e75eeSDanilo Krummrich 
57929eaf32SDave Airlie /*
58929eaf32SDave Airlie  * NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
59929eaf32SDave Airlie  *
60929eaf32SDave Airlie  * Query the VRAM BAR size.
61929eaf32SDave Airlie  */
62929eaf32SDave Airlie #define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
63929eaf32SDave Airlie 
646446c1caSDave Airlie /*
656446c1caSDave Airlie  * NOUVEAU_GETPARAM_VRAM_USED
666446c1caSDave Airlie  *
676446c1caSDave Airlie  * Get remaining VRAM size.
686446c1caSDave Airlie  */
696446c1caSDave Airlie #define NOUVEAU_GETPARAM_VRAM_USED 19
706446c1caSDave Airlie 
71*3652ac87SMohamed Ahmed /*
72*3652ac87SMohamed Ahmed  * NOUVEAU_GETPARAM_HAS_VMA_TILEMODE
73*3652ac87SMohamed Ahmed  *
74*3652ac87SMohamed Ahmed  * Query whether tile mode and PTE kind are accepted with VM allocs or not.
75*3652ac87SMohamed Ahmed  */
76*3652ac87SMohamed Ahmed #define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
77*3652ac87SMohamed Ahmed 
787a5d5f9cSDave Airlie struct drm_nouveau_getparam {
797a5d5f9cSDave Airlie 	__u64 param;
807a5d5f9cSDave Airlie 	__u64 value;
817a5d5f9cSDave Airlie };
827a5d5f9cSDave Airlie 
837a5d5f9cSDave Airlie struct drm_nouveau_channel_alloc {
847a5d5f9cSDave Airlie 	__u32     fb_ctxdma_handle;
857a5d5f9cSDave Airlie 	__u32     tt_ctxdma_handle;
867a5d5f9cSDave Airlie 
877a5d5f9cSDave Airlie 	__s32     channel;
887a5d5f9cSDave Airlie 	__u32     pushbuf_domains;
897a5d5f9cSDave Airlie 
907a5d5f9cSDave Airlie 	/* Notifier memory */
917a5d5f9cSDave Airlie 	__u32     notifier_handle;
927a5d5f9cSDave Airlie 
937a5d5f9cSDave Airlie 	/* DRM-enforced subchannel assignments */
947a5d5f9cSDave Airlie 	struct {
957a5d5f9cSDave Airlie 		__u32 handle;
967a5d5f9cSDave Airlie 		__u32 grclass;
977a5d5f9cSDave Airlie 	} subchan[8];
987a5d5f9cSDave Airlie 	__u32 nr_subchan;
997a5d5f9cSDave Airlie };
1007a5d5f9cSDave Airlie 
1017a5d5f9cSDave Airlie struct drm_nouveau_channel_free {
1027a5d5f9cSDave Airlie 	__s32 channel;
1037a5d5f9cSDave Airlie };
1047a5d5f9cSDave Airlie 
105718dceddSDavid Howells #define NOUVEAU_GEM_DOMAIN_CPU       (1 << 0)
106718dceddSDavid Howells #define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
107718dceddSDavid Howells #define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
108718dceddSDavid Howells #define NOUVEAU_GEM_DOMAIN_MAPPABLE  (1 << 3)
109996f545fSAlexandre Courbot #define NOUVEAU_GEM_DOMAIN_COHERENT  (1 << 4)
110e0223899SDanilo Krummrich /* The BO will never be shared via import or export. */
111e0223899SDanilo Krummrich #define NOUVEAU_GEM_DOMAIN_NO_SHARE  (1 << 5)
112718dceddSDavid Howells 
113718dceddSDavid Howells #define NOUVEAU_GEM_TILE_COMP        0x00030000 /* nv50-only */
114718dceddSDavid Howells #define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
115718dceddSDavid Howells #define NOUVEAU_GEM_TILE_16BPP       0x00000001
116718dceddSDavid Howells #define NOUVEAU_GEM_TILE_32BPP       0x00000002
117718dceddSDavid Howells #define NOUVEAU_GEM_TILE_ZETA        0x00000004
118718dceddSDavid Howells #define NOUVEAU_GEM_TILE_NONCONTIG   0x00000008
119718dceddSDavid Howells 
120718dceddSDavid Howells struct drm_nouveau_gem_info {
1218860487eSMikko Rapeli 	__u32 handle;
1228860487eSMikko Rapeli 	__u32 domain;
1238860487eSMikko Rapeli 	__u64 size;
1248860487eSMikko Rapeli 	__u64 offset;
1258860487eSMikko Rapeli 	__u64 map_handle;
1268860487eSMikko Rapeli 	__u32 tile_mode;
1278860487eSMikko Rapeli 	__u32 tile_flags;
128718dceddSDavid Howells };
129718dceddSDavid Howells 
130718dceddSDavid Howells struct drm_nouveau_gem_new {
131718dceddSDavid Howells 	struct drm_nouveau_gem_info info;
1328860487eSMikko Rapeli 	__u32 channel_hint;
1338860487eSMikko Rapeli 	__u32 align;
134718dceddSDavid Howells };
135718dceddSDavid Howells 
136718dceddSDavid Howells #define NOUVEAU_GEM_MAX_BUFFERS 1024
137718dceddSDavid Howells struct drm_nouveau_gem_pushbuf_bo_presumed {
1388860487eSMikko Rapeli 	__u32 valid;
1398860487eSMikko Rapeli 	__u32 domain;
1408860487eSMikko Rapeli 	__u64 offset;
141718dceddSDavid Howells };
142718dceddSDavid Howells 
143718dceddSDavid Howells struct drm_nouveau_gem_pushbuf_bo {
1448860487eSMikko Rapeli 	__u64 user_priv;
1458860487eSMikko Rapeli 	__u32 handle;
1468860487eSMikko Rapeli 	__u32 read_domains;
1478860487eSMikko Rapeli 	__u32 write_domains;
1488860487eSMikko Rapeli 	__u32 valid_domains;
149718dceddSDavid Howells 	struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
150718dceddSDavid Howells };
151718dceddSDavid Howells 
152718dceddSDavid Howells #define NOUVEAU_GEM_RELOC_LOW  (1 << 0)
153718dceddSDavid Howells #define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
154718dceddSDavid Howells #define NOUVEAU_GEM_RELOC_OR   (1 << 2)
155718dceddSDavid Howells #define NOUVEAU_GEM_MAX_RELOCS 1024
156718dceddSDavid Howells struct drm_nouveau_gem_pushbuf_reloc {
1578860487eSMikko Rapeli 	__u32 reloc_bo_index;
1588860487eSMikko Rapeli 	__u32 reloc_bo_offset;
1598860487eSMikko Rapeli 	__u32 bo_index;
1608860487eSMikko Rapeli 	__u32 flags;
1618860487eSMikko Rapeli 	__u32 data;
1628860487eSMikko Rapeli 	__u32 vor;
1638860487eSMikko Rapeli 	__u32 tor;
164718dceddSDavid Howells };
165718dceddSDavid Howells 
166718dceddSDavid Howells #define NOUVEAU_GEM_MAX_PUSH 512
167718dceddSDavid Howells struct drm_nouveau_gem_pushbuf_push {
1688860487eSMikko Rapeli 	__u32 bo_index;
1698860487eSMikko Rapeli 	__u32 pad;
1708860487eSMikko Rapeli 	__u64 offset;
1718860487eSMikko Rapeli 	__u64 length;
172443f9e0bSDanilo Krummrich #define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
173718dceddSDavid Howells };
174718dceddSDavid Howells 
175718dceddSDavid Howells struct drm_nouveau_gem_pushbuf {
1768860487eSMikko Rapeli 	__u32 channel;
1778860487eSMikko Rapeli 	__u32 nr_buffers;
1788860487eSMikko Rapeli 	__u64 buffers;
1798860487eSMikko Rapeli 	__u32 nr_relocs;
1808860487eSMikko Rapeli 	__u32 nr_push;
1818860487eSMikko Rapeli 	__u64 relocs;
1828860487eSMikko Rapeli 	__u64 push;
1838860487eSMikko Rapeli 	__u32 suffix0;
1848860487eSMikko Rapeli 	__u32 suffix1;
1850352029eSBen Skeggs #define NOUVEAU_GEM_PUSHBUF_SYNC                                    (1ULL << 0)
1868860487eSMikko Rapeli 	__u64 vram_available;
1878860487eSMikko Rapeli 	__u64 gart_available;
188718dceddSDavid Howells };
189718dceddSDavid Howells 
190718dceddSDavid Howells #define NOUVEAU_GEM_CPU_PREP_NOWAIT                                  0x00000001
191718dceddSDavid Howells #define NOUVEAU_GEM_CPU_PREP_WRITE                                   0x00000004
192718dceddSDavid Howells struct drm_nouveau_gem_cpu_prep {
1938860487eSMikko Rapeli 	__u32 handle;
1948860487eSMikko Rapeli 	__u32 flags;
195718dceddSDavid Howells };
196718dceddSDavid Howells 
197718dceddSDavid Howells struct drm_nouveau_gem_cpu_fini {
1988860487eSMikko Rapeli 	__u32 handle;
199718dceddSDavid Howells };
200718dceddSDavid Howells 
201e0223899SDanilo Krummrich /**
202e0223899SDanilo Krummrich  * struct drm_nouveau_sync - sync object
203e0223899SDanilo Krummrich  *
204e0223899SDanilo Krummrich  * This structure serves as synchronization mechanism for (potentially)
205e0223899SDanilo Krummrich  * asynchronous operations such as EXEC or VM_BIND.
206e0223899SDanilo Krummrich  */
207e0223899SDanilo Krummrich struct drm_nouveau_sync {
208e0223899SDanilo Krummrich 	/**
209e0223899SDanilo Krummrich 	 * @flags: the flags for a sync object
210e0223899SDanilo Krummrich 	 *
211e0223899SDanilo Krummrich 	 * The first 8 bits are used to determine the type of the sync object.
212e0223899SDanilo Krummrich 	 */
213e0223899SDanilo Krummrich 	__u32 flags;
214e0223899SDanilo Krummrich #define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
215e0223899SDanilo Krummrich #define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
216e0223899SDanilo Krummrich #define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
217e0223899SDanilo Krummrich 	/**
218e0223899SDanilo Krummrich 	 * @handle: the handle of the sync object
219e0223899SDanilo Krummrich 	 */
220e0223899SDanilo Krummrich 	__u32 handle;
221e0223899SDanilo Krummrich 	/**
222e0223899SDanilo Krummrich 	 * @timeline_value:
223e0223899SDanilo Krummrich 	 *
224e0223899SDanilo Krummrich 	 * The timeline point of the sync object in case the syncobj is of
225e0223899SDanilo Krummrich 	 * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
226e0223899SDanilo Krummrich 	 */
227e0223899SDanilo Krummrich 	__u64 timeline_value;
228e0223899SDanilo Krummrich };
229e0223899SDanilo Krummrich 
230e0223899SDanilo Krummrich /**
231e0223899SDanilo Krummrich  * struct drm_nouveau_vm_init - GPU VA space init structure
232e0223899SDanilo Krummrich  *
233e0223899SDanilo Krummrich  * Used to initialize the GPU's VA space for a user client, telling the kernel
234e0223899SDanilo Krummrich  * which portion of the VA space is managed by the UMD and kernel respectively.
235e0223899SDanilo Krummrich  *
236e0223899SDanilo Krummrich  * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
237e0223899SDanilo Krummrich  * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
238e0223899SDanilo Krummrich  * with -ENOSYS.
239e0223899SDanilo Krummrich  */
240e0223899SDanilo Krummrich struct drm_nouveau_vm_init {
241e0223899SDanilo Krummrich 	/**
242e0223899SDanilo Krummrich 	 * @kernel_managed_addr: start address of the kernel managed VA space
243e0223899SDanilo Krummrich 	 * region
244e0223899SDanilo Krummrich 	 */
245e0223899SDanilo Krummrich 	__u64 kernel_managed_addr;
246e0223899SDanilo Krummrich 	/**
247e0223899SDanilo Krummrich 	 * @kernel_managed_size: size of the kernel managed VA space region in
248e0223899SDanilo Krummrich 	 * bytes
249e0223899SDanilo Krummrich 	 */
250e0223899SDanilo Krummrich 	__u64 kernel_managed_size;
251e0223899SDanilo Krummrich };
252e0223899SDanilo Krummrich 
253e0223899SDanilo Krummrich /**
254e0223899SDanilo Krummrich  * struct drm_nouveau_vm_bind_op - VM_BIND operation
255e0223899SDanilo Krummrich  *
256e0223899SDanilo Krummrich  * This structure represents a single VM_BIND operation. UMDs should pass
257e0223899SDanilo Krummrich  * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
258e0223899SDanilo Krummrich  */
259e0223899SDanilo Krummrich struct drm_nouveau_vm_bind_op {
260e0223899SDanilo Krummrich 	/**
261e0223899SDanilo Krummrich 	 * @op: the operation type
262e0223899SDanilo Krummrich 	 */
263e0223899SDanilo Krummrich 	__u32 op;
264e0223899SDanilo Krummrich /**
265e0223899SDanilo Krummrich  * @DRM_NOUVEAU_VM_BIND_OP_MAP:
266e0223899SDanilo Krummrich  *
267e0223899SDanilo Krummrich  * Map a GEM object to the GPU's VA space. Optionally, the
268e0223899SDanilo Krummrich  * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
269e0223899SDanilo Krummrich  * create sparse mappings for the given range.
270e0223899SDanilo Krummrich  */
271e0223899SDanilo Krummrich #define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
272e0223899SDanilo Krummrich /**
273e0223899SDanilo Krummrich  * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
274e0223899SDanilo Krummrich  *
275e0223899SDanilo Krummrich  * Unmap an existing mapping in the GPU's VA space. If the region the mapping
276e0223899SDanilo Krummrich  * is located in is a sparse region, new sparse mappings are created where the
277e0223899SDanilo Krummrich  * unmapped (memory backed) mapping was mapped previously. To remove a sparse
278e0223899SDanilo Krummrich  * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
279e0223899SDanilo Krummrich  */
280e0223899SDanilo Krummrich #define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
281e0223899SDanilo Krummrich 	/**
282e0223899SDanilo Krummrich 	 * @flags: the flags for a &drm_nouveau_vm_bind_op
283e0223899SDanilo Krummrich 	 */
284e0223899SDanilo Krummrich 	__u32 flags;
285e0223899SDanilo Krummrich /**
286e0223899SDanilo Krummrich  * @DRM_NOUVEAU_VM_BIND_SPARSE:
287e0223899SDanilo Krummrich  *
288e0223899SDanilo Krummrich  * Indicates that an allocated VA space region should be sparse.
289e0223899SDanilo Krummrich  */
290e0223899SDanilo Krummrich #define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
291e0223899SDanilo Krummrich 	/**
292e0223899SDanilo Krummrich 	 * @handle: the handle of the DRM GEM object to map
293e0223899SDanilo Krummrich 	 */
294e0223899SDanilo Krummrich 	__u32 handle;
295e0223899SDanilo Krummrich 	/**
296e0223899SDanilo Krummrich 	 * @pad: 32 bit padding, should be 0
297e0223899SDanilo Krummrich 	 */
298e0223899SDanilo Krummrich 	__u32 pad;
299e0223899SDanilo Krummrich 	/**
300e0223899SDanilo Krummrich 	 * @addr:
301e0223899SDanilo Krummrich 	 *
302e0223899SDanilo Krummrich 	 * the address the VA space region or (memory backed) mapping should be mapped to
303e0223899SDanilo Krummrich 	 */
304e0223899SDanilo Krummrich 	__u64 addr;
305e0223899SDanilo Krummrich 	/**
306e0223899SDanilo Krummrich 	 * @bo_offset: the offset within the BO backing the mapping
307e0223899SDanilo Krummrich 	 */
308e0223899SDanilo Krummrich 	__u64 bo_offset;
309e0223899SDanilo Krummrich 	/**
310e0223899SDanilo Krummrich 	 * @range: the size of the requested mapping in bytes
311e0223899SDanilo Krummrich 	 */
312e0223899SDanilo Krummrich 	__u64 range;
313e0223899SDanilo Krummrich };
314e0223899SDanilo Krummrich 
315e0223899SDanilo Krummrich /**
316e0223899SDanilo Krummrich  * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
317e0223899SDanilo Krummrich  */
318e0223899SDanilo Krummrich struct drm_nouveau_vm_bind {
319e0223899SDanilo Krummrich 	/**
320e0223899SDanilo Krummrich 	 * @op_count: the number of &drm_nouveau_vm_bind_op
321e0223899SDanilo Krummrich 	 */
322e0223899SDanilo Krummrich 	__u32 op_count;
323e0223899SDanilo Krummrich 	/**
324e0223899SDanilo Krummrich 	 * @flags: the flags for a &drm_nouveau_vm_bind ioctl
325e0223899SDanilo Krummrich 	 */
326e0223899SDanilo Krummrich 	__u32 flags;
327e0223899SDanilo Krummrich /**
328e0223899SDanilo Krummrich  * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
329e0223899SDanilo Krummrich  *
330e0223899SDanilo Krummrich  * Indicates that the given VM_BIND operation should be executed asynchronously
331e0223899SDanilo Krummrich  * by the kernel.
332e0223899SDanilo Krummrich  *
333e0223899SDanilo Krummrich  * If this flag is not supplied the kernel executes the associated operations
334e0223899SDanilo Krummrich  * synchronously and doesn't accept any &drm_nouveau_sync objects.
335e0223899SDanilo Krummrich  */
336e0223899SDanilo Krummrich #define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
337e0223899SDanilo Krummrich 	/**
338e0223899SDanilo Krummrich 	 * @wait_count: the number of wait &drm_nouveau_syncs
339e0223899SDanilo Krummrich 	 */
340e0223899SDanilo Krummrich 	__u32 wait_count;
341e0223899SDanilo Krummrich 	/**
342e0223899SDanilo Krummrich 	 * @sig_count: the number of &drm_nouveau_syncs to signal when finished
343e0223899SDanilo Krummrich 	 */
344e0223899SDanilo Krummrich 	__u32 sig_count;
345e0223899SDanilo Krummrich 	/**
346e0223899SDanilo Krummrich 	 * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
347e0223899SDanilo Krummrich 	 */
348e0223899SDanilo Krummrich 	__u64 wait_ptr;
349e0223899SDanilo Krummrich 	/**
350e0223899SDanilo Krummrich 	 * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
351e0223899SDanilo Krummrich 	 */
352e0223899SDanilo Krummrich 	__u64 sig_ptr;
353e0223899SDanilo Krummrich 	/**
354e0223899SDanilo Krummrich 	 * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
355e0223899SDanilo Krummrich 	 */
356e0223899SDanilo Krummrich 	__u64 op_ptr;
357e0223899SDanilo Krummrich };
358e0223899SDanilo Krummrich 
359e0223899SDanilo Krummrich /**
360e0223899SDanilo Krummrich  * struct drm_nouveau_exec_push - EXEC push operation
361e0223899SDanilo Krummrich  *
362e0223899SDanilo Krummrich  * This structure represents a single EXEC push operation. UMDs should pass an
363e0223899SDanilo Krummrich  * array of this structure via struct drm_nouveau_exec's &push_ptr field.
364e0223899SDanilo Krummrich  */
365e0223899SDanilo Krummrich struct drm_nouveau_exec_push {
366e0223899SDanilo Krummrich 	/**
367e0223899SDanilo Krummrich 	 * @va: the virtual address of the push buffer mapping
368e0223899SDanilo Krummrich 	 */
369e0223899SDanilo Krummrich 	__u64 va;
370e0223899SDanilo Krummrich 	/**
371e0223899SDanilo Krummrich 	 * @va_len: the length of the push buffer mapping
372e0223899SDanilo Krummrich 	 */
373443f9e0bSDanilo Krummrich 	__u32 va_len;
374443f9e0bSDanilo Krummrich 	/**
375443f9e0bSDanilo Krummrich 	 * @flags: the flags for this push buffer mapping
376443f9e0bSDanilo Krummrich 	 */
377443f9e0bSDanilo Krummrich 	__u32 flags;
378443f9e0bSDanilo Krummrich #define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
379e0223899SDanilo Krummrich };
380e0223899SDanilo Krummrich 
381e0223899SDanilo Krummrich /**
382e0223899SDanilo Krummrich  * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
383e0223899SDanilo Krummrich  */
384e0223899SDanilo Krummrich struct drm_nouveau_exec {
385e0223899SDanilo Krummrich 	/**
386e0223899SDanilo Krummrich 	 * @channel: the channel to execute the push buffer in
387e0223899SDanilo Krummrich 	 */
388e0223899SDanilo Krummrich 	__u32 channel;
389e0223899SDanilo Krummrich 	/**
390e0223899SDanilo Krummrich 	 * @push_count: the number of &drm_nouveau_exec_push ops
391e0223899SDanilo Krummrich 	 */
392e0223899SDanilo Krummrich 	__u32 push_count;
393e0223899SDanilo Krummrich 	/**
394e0223899SDanilo Krummrich 	 * @wait_count: the number of wait &drm_nouveau_syncs
395e0223899SDanilo Krummrich 	 */
396e0223899SDanilo Krummrich 	__u32 wait_count;
397e0223899SDanilo Krummrich 	/**
398e0223899SDanilo Krummrich 	 * @sig_count: the number of &drm_nouveau_syncs to signal when finished
399e0223899SDanilo Krummrich 	 */
400e0223899SDanilo Krummrich 	__u32 sig_count;
401e0223899SDanilo Krummrich 	/**
402e0223899SDanilo Krummrich 	 * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
403e0223899SDanilo Krummrich 	 */
404e0223899SDanilo Krummrich 	__u64 wait_ptr;
405e0223899SDanilo Krummrich 	/**
406e0223899SDanilo Krummrich 	 * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
407e0223899SDanilo Krummrich 	 */
408e0223899SDanilo Krummrich 	__u64 sig_ptr;
409e0223899SDanilo Krummrich 	/**
410e0223899SDanilo Krummrich 	 * @push_ptr: pointer to &drm_nouveau_exec_push ops
411e0223899SDanilo Krummrich 	 */
412e0223899SDanilo Krummrich 	__u64 push_ptr;
413e0223899SDanilo Krummrich };
414e0223899SDanilo Krummrich 
4157a5d5f9cSDave Airlie #define DRM_NOUVEAU_GETPARAM           0x00
416718dceddSDavid Howells #define DRM_NOUVEAU_SETPARAM           0x01 /* deprecated */
4177a5d5f9cSDave Airlie #define DRM_NOUVEAU_CHANNEL_ALLOC      0x02
4187a5d5f9cSDave Airlie #define DRM_NOUVEAU_CHANNEL_FREE       0x03
419718dceddSDavid Howells #define DRM_NOUVEAU_GROBJ_ALLOC        0x04 /* deprecated */
420718dceddSDavid Howells #define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC  0x05 /* deprecated */
421718dceddSDavid Howells #define DRM_NOUVEAU_GPUOBJ_FREE        0x06 /* deprecated */
42227111a23SBen Skeggs #define DRM_NOUVEAU_NVIF               0x07
423eeaf06acSBen Skeggs #define DRM_NOUVEAU_SVM_INIT           0x08
424f180bf12SJérôme Glisse #define DRM_NOUVEAU_SVM_BIND           0x09
425e0223899SDanilo Krummrich #define DRM_NOUVEAU_VM_INIT            0x10
426e0223899SDanilo Krummrich #define DRM_NOUVEAU_VM_BIND            0x11
427e0223899SDanilo Krummrich #define DRM_NOUVEAU_EXEC               0x12
428718dceddSDavid Howells #define DRM_NOUVEAU_GEM_NEW            0x40
429718dceddSDavid Howells #define DRM_NOUVEAU_GEM_PUSHBUF        0x41
430718dceddSDavid Howells #define DRM_NOUVEAU_GEM_CPU_PREP       0x42
431718dceddSDavid Howells #define DRM_NOUVEAU_GEM_CPU_FINI       0x43
432718dceddSDavid Howells #define DRM_NOUVEAU_GEM_INFO           0x44
433718dceddSDavid Howells 
434eeaf06acSBen Skeggs struct drm_nouveau_svm_init {
435eeaf06acSBen Skeggs 	__u64 unmanaged_addr;
436eeaf06acSBen Skeggs 	__u64 unmanaged_size;
437eeaf06acSBen Skeggs };
438eeaf06acSBen Skeggs 
439f180bf12SJérôme Glisse struct drm_nouveau_svm_bind {
440f180bf12SJérôme Glisse 	__u64 header;
441f180bf12SJérôme Glisse 	__u64 va_start;
442f180bf12SJérôme Glisse 	__u64 va_end;
443f180bf12SJérôme Glisse 	__u64 npages;
444f180bf12SJérôme Glisse 	__u64 stride;
445f180bf12SJérôme Glisse 	__u64 result;
446f180bf12SJérôme Glisse 	__u64 reserved0;
447f180bf12SJérôme Glisse 	__u64 reserved1;
448f180bf12SJérôme Glisse };
449f180bf12SJérôme Glisse 
450f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_COMMAND_SHIFT          0
451f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_COMMAND_BITS           8
452f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_COMMAND_MASK           ((1 << 8) - 1)
453f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_PRIORITY_SHIFT         8
454f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_PRIORITY_BITS          8
455f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_PRIORITY_MASK          ((1 << 8) - 1)
456f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_TARGET_SHIFT           16
457f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_TARGET_BITS            32
458f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_TARGET_MASK            0xffffffff
459f180bf12SJérôme Glisse 
460f180bf12SJérôme Glisse /*
461f180bf12SJérôme Glisse  * Below is use to validate ioctl argument, userspace can also use it to make
462f180bf12SJérôme Glisse  * sure that no bit are set beyond known fields for a given kernel version.
463f180bf12SJérôme Glisse  */
464f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_VALID_BITS     48
465f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_VALID_MASK     ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
466f180bf12SJérôme Glisse 
467f180bf12SJérôme Glisse 
468f180bf12SJérôme Glisse /*
469f180bf12SJérôme Glisse  * NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
470f180bf12SJérôme Glisse  * result: number of page successfuly migrate to the target memory.
471f180bf12SJérôme Glisse  */
472f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_COMMAND__MIGRATE               0
473f180bf12SJérôme Glisse 
474f180bf12SJérôme Glisse /*
475f180bf12SJérôme Glisse  * NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
476f180bf12SJérôme Glisse  */
477f180bf12SJérôme Glisse #define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM               (1UL << 31)
478f180bf12SJérôme Glisse 
479f180bf12SJérôme Glisse 
4807a5d5f9cSDave Airlie #define DRM_IOCTL_NOUVEAU_GETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
4817a5d5f9cSDave Airlie #define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
4827a5d5f9cSDave Airlie #define DRM_IOCTL_NOUVEAU_CHANNEL_FREE       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
4837a5d5f9cSDave Airlie 
484eeaf06acSBen Skeggs #define DRM_IOCTL_NOUVEAU_SVM_INIT           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
485f180bf12SJérôme Glisse #define DRM_IOCTL_NOUVEAU_SVM_BIND           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
486eeaf06acSBen Skeggs 
487718dceddSDavid Howells #define DRM_IOCTL_NOUVEAU_GEM_NEW            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
488718dceddSDavid Howells #define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF        DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
489718dceddSDavid Howells #define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
490718dceddSDavid Howells #define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
491718dceddSDavid Howells #define DRM_IOCTL_NOUVEAU_GEM_INFO           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
492718dceddSDavid Howells 
493e0223899SDanilo Krummrich #define DRM_IOCTL_NOUVEAU_VM_INIT            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
494e0223899SDanilo Krummrich #define DRM_IOCTL_NOUVEAU_VM_BIND            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
495e0223899SDanilo Krummrich #define DRM_IOCTL_NOUVEAU_EXEC               DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
4968daf6359SEmil Velikov #if defined(__cplusplus)
4978daf6359SEmil Velikov }
4988daf6359SEmil Velikov #endif
4998daf6359SEmil Velikov 
500718dceddSDavid Howells #endif /* __NOUVEAU_DRM_H__ */
501