VirtualBox

source: vbox/trunk/src/VBox/Additions/x11/x11include/libdrm-2.4.5/drm.h@ 17232

Last change on this file since 17232 was 17232, checked in by vboxsync, 16 years ago

Additions/x11/x11include: added header files needed for DRI support in vboxvideo

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 35.3 KB
Line 
1/**
2 * \file drm.h
3 * Header for the Direct Rendering Manager
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 *
7 * \par Acknowledgments:
8 * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
9 */
10
11/*
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All rights reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36/**
37 * \mainpage
38 *
39 * The Direct Rendering Manager (DRM) is a device-independent kernel-level
40 * device driver that provides support for the XFree86 Direct Rendering
41 * Infrastructure (DRI).
42 *
43 * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
44 * ways:
45 * -# The DRM provides synchronized access to the graphics hardware via
46 * the use of an optimized two-tiered lock.
47 * -# The DRM enforces the DRI security policy for access to the graphics
48 * hardware by only allowing authenticated X11 clients access to
49 * restricted regions of memory.
50 * -# The DRM provides a generic DMA engine, complete with multiple
51 * queues and the ability to detect the need for an OpenGL context
52 * switch.
53 * -# The DRM is extensible via the use of small device-specific modules
54 * that rely extensively on the API exported by the DRM module.
55 *
56 */
57
58#ifndef _DRM_H_
59#define _DRM_H_
60
61#ifndef __user
62#define __user
63#endif
64#ifndef __iomem
65#define __iomem
66#endif
67
68#ifdef __GNUC__
69# define DEPRECATED __attribute__ ((deprecated))
70#else
71# define DEPRECATED
72#endif
73
74#if defined(__linux__)
75#include <asm/ioctl.h> /* For _IO* macros */
76#define DRM_IOCTL_NR(n) _IOC_NR(n)
77#define DRM_IOC_VOID _IOC_NONE
78#define DRM_IOC_READ _IOC_READ
79#define DRM_IOC_WRITE _IOC_WRITE
80#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
81#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
82#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
83#include <sys/ioccom.h>
84#define DRM_IOCTL_NR(n) ((n) & 0xff)
85#define DRM_IOC_VOID IOC_VOID
86#define DRM_IOC_READ IOC_OUT
87#define DRM_IOC_WRITE IOC_IN
88#define DRM_IOC_READWRITE IOC_INOUT
89#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
90#endif
91
92#ifdef __OpenBSD__
93#define DRM_MAJOR 81
94#endif
95#if defined(__linux__) || defined(__NetBSD__)
96#define DRM_MAJOR 226
97#endif
98#define DRM_MAX_MINOR 15
99
100#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
101#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
102#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
103#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
104
105#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
106#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
107#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
108#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
109#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
110
111#if defined(__linux__)
112typedef unsigned int drm_handle_t;
113#else
114#include <sys/types.h>
115typedef unsigned long drm_handle_t; /**< To mapped regions */
116#endif
117typedef unsigned int drm_context_t; /**< GLXContext handle */
118typedef unsigned int drm_drawable_t;
119typedef unsigned int drm_magic_t; /**< Magic for authentication */
120
121/**
122 * Cliprect.
123 *
124 * \warning If you change this structure, make sure you change
125 * XF86DRIClipRectRec in the server as well
126 *
127 * \note KW: Actually it's illegal to change either for
128 * backwards-compatibility reasons.
129 */
130struct drm_clip_rect {
131 unsigned short x1;
132 unsigned short y1;
133 unsigned short x2;
134 unsigned short y2;
135};
136
137/**
138 * Texture region,
139 */
140struct drm_tex_region {
141 unsigned char next;
142 unsigned char prev;
143 unsigned char in_use;
144 unsigned char padding;
145 unsigned int age;
146};
147
148/**
149 * Hardware lock.
150 *
151 * The lock structure is a simple cache-line aligned integer. To avoid
152 * processor bus contention on a multiprocessor system, there should not be any
153 * other data stored in the same cache line.
154 */
155struct drm_hw_lock {
156 __volatile__ unsigned int lock; /**< lock variable */
157 char padding[60]; /**< Pad to cache line */
158};
159
160/* This is beyond ugly, and only works on GCC. However, it allows me to use
161 * drm.h in places (i.e., in the X-server) where I can't use size_t. The real
162 * fix is to use uint32_t instead of size_t, but that fix will break existing
163 * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems. That *will*
164 * eventually happen, though. I chose 'unsigned long' to be the fallback type
165 * because that works on all the platforms I know about. Hopefully, the
166 * real fix will happen before that bites us.
167 */
168
169#ifdef __SIZE_TYPE__
170# define DRM_SIZE_T __SIZE_TYPE__
171#else
172# warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!"
173# define DRM_SIZE_T unsigned long
174#endif
175
176/**
177 * DRM_IOCTL_VERSION ioctl argument type.
178 *
179 * \sa drmGetVersion().
180 */
181struct drm_version {
182 int version_major; /**< Major version */
183 int version_minor; /**< Minor version */
184 int version_patchlevel; /**< Patch level */
185 DRM_SIZE_T name_len; /**< Length of name buffer */
186 char __user *name; /**< Name of driver */
187 DRM_SIZE_T date_len; /**< Length of date buffer */
188 char __user *date; /**< User-space buffer to hold date */
189 DRM_SIZE_T desc_len; /**< Length of desc buffer */
190 char __user *desc; /**< User-space buffer to hold desc */
191};
192
193/**
194 * DRM_IOCTL_GET_UNIQUE ioctl argument type.
195 *
196 * \sa drmGetBusid() and drmSetBusId().
197 */
198struct drm_unique {
199 DRM_SIZE_T unique_len; /**< Length of unique */
200 char __user *unique; /**< Unique name for driver instantiation */
201};
202
203#undef DRM_SIZE_T
204
205struct drm_list {
206 int count; /**< Length of user-space structures */
207 struct drm_version __user *version;
208};
209
210struct drm_block {
211 int unused;
212};
213
214/**
215 * DRM_IOCTL_CONTROL ioctl argument type.
216 *
217 * \sa drmCtlInstHandler() and drmCtlUninstHandler().
218 */
219struct drm_control {
220 enum {
221 DRM_ADD_COMMAND,
222 DRM_RM_COMMAND,
223 DRM_INST_HANDLER,
224 DRM_UNINST_HANDLER
225 } func;
226 int irq;
227};
228
229/**
230 * Type of memory to map.
231 */
232enum drm_map_type {
233 _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
234 _DRM_REGISTERS = 1, /**< no caching, no core dump */
235 _DRM_SHM = 2, /**< shared, cached */
236 _DRM_AGP = 3, /**< AGP/GART */
237 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
238 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
239 _DRM_GEM = 6,
240 _DRM_TTM = 7,
241};
242
243/**
244 * Memory mapping flags.
245 */
246enum drm_map_flags {
247 _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
248 _DRM_READ_ONLY = 0x02,
249 _DRM_LOCKED = 0x04, /**< shared, cached, locked */
250 _DRM_KERNEL = 0x08, /**< kernel requires access */
251 _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
252 _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
253 _DRM_REMOVABLE = 0x40, /**< Removable mapping */
254 _DRM_DRIVER = 0x80 /**< Managed by driver */
255};
256
257struct drm_ctx_priv_map {
258 unsigned int ctx_id; /**< Context requesting private mapping */
259 void *handle; /**< Handle of map */
260};
261
262/**
263 * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
264 * argument type.
265 *
266 * \sa drmAddMap().
267 */
268struct drm_map {
269 unsigned long offset; /**< Requested physical address (0 for SAREA)*/
270 unsigned long size; /**< Requested physical size (bytes) */
271 enum drm_map_type type; /**< Type of memory to map */
272 enum drm_map_flags flags; /**< Flags */
273 void *handle; /**< User-space: "Handle" to pass to mmap() */
274 /**< Kernel-space: kernel-virtual address */
275 int mtrr; /**< MTRR slot used */
276 /* Private data */
277};
278
279/**
280 * DRM_IOCTL_GET_CLIENT ioctl argument type.
281 */
282struct drm_client {
283 int idx; /**< Which client desired? */
284 int auth; /**< Is client authenticated? */
285 unsigned long pid; /**< Process ID */
286 unsigned long uid; /**< User ID */
287 unsigned long magic; /**< Magic */
288 unsigned long iocs; /**< Ioctl count */
289};
290
291enum drm_stat_type {
292 _DRM_STAT_LOCK,
293 _DRM_STAT_OPENS,
294 _DRM_STAT_CLOSES,
295 _DRM_STAT_IOCTLS,
296 _DRM_STAT_LOCKS,
297 _DRM_STAT_UNLOCKS,
298 _DRM_STAT_VALUE, /**< Generic value */
299 _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
300 _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
301
302 _DRM_STAT_IRQ, /**< IRQ */
303 _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
304 _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
305 _DRM_STAT_DMA, /**< DMA */
306 _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
307 _DRM_STAT_MISSED /**< Missed DMA opportunity */
308 /* Add to the *END* of the list */
309};
310
311/**
312 * DRM_IOCTL_GET_STATS ioctl argument type.
313 */
314struct drm_stats {
315 unsigned long count;
316 struct {
317 unsigned long value;
318 enum drm_stat_type type;
319 } data[15];
320};
321
322/**
323 * Hardware locking flags.
324 */
325enum drm_lock_flags {
326 _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
327 _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
328 _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
329 _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
330 /* These *HALT* flags aren't supported yet
331 -- they will be used to support the
332 full-screen DGA-like mode. */
333 _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
334 _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
335};
336
337/**
338 * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
339 *
340 * \sa drmGetLock() and drmUnlock().
341 */
342struct drm_lock {
343 int context;
344 enum drm_lock_flags flags;
345};
346
347/**
348 * DMA flags
349 *
350 * \warning
351 * These values \e must match xf86drm.h.
352 *
353 * \sa drm_dma.
354 */
355enum drm_dma_flags {
356 /* Flags for DMA buffer dispatch */
357 _DRM_DMA_BLOCK = 0x01, /**<
358 * Block until buffer dispatched.
359 *
360 * \note The buffer may not yet have
361 * been processed by the hardware --
362 * getting a hardware lock with the
363 * hardware quiescent will ensure
364 * that the buffer has been
365 * processed.
366 */
367 _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
368 _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
369
370 /* Flags for DMA buffer request */
371 _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
372 _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
373 _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
374};
375
376/**
377 * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
378 *
379 * \sa drmAddBufs().
380 */
381struct drm_buf_desc {
382 int count; /**< Number of buffers of this size */
383 int size; /**< Size in bytes */
384 int low_mark; /**< Low water mark */
385 int high_mark; /**< High water mark */
386 enum {
387 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
388 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
389 _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
390 _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
391 _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
392 } flags;
393 unsigned long agp_start; /**<
394 * Start address of where the AGP buffers are
395 * in the AGP aperture
396 */
397};
398
399/**
400 * DRM_IOCTL_INFO_BUFS ioctl argument type.
401 */
402struct drm_buf_info {
403 int count; /**< Number of buffers described in list */
404 struct drm_buf_desc __user *list; /**< List of buffer descriptions */
405};
406
407/**
408 * DRM_IOCTL_FREE_BUFS ioctl argument type.
409 */
410struct drm_buf_free {
411 int count;
412 int __user *list;
413};
414
415/**
416 * Buffer information
417 *
418 * \sa drm_buf_map.
419 */
420struct drm_buf_pub {
421 int idx; /**< Index into the master buffer list */
422 int total; /**< Buffer size */
423 int used; /**< Amount of buffer in use (for DMA) */
424 void __user *address; /**< Address of buffer */
425};
426
427/**
428 * DRM_IOCTL_MAP_BUFS ioctl argument type.
429 */
430struct drm_buf_map {
431 int count; /**< Length of the buffer list */
432#if defined(__cplusplus)
433 void __user *c_virtual;
434#else
435 void __user *virtual; /**< Mmap'd area in user-virtual */
436#endif
437 struct drm_buf_pub __user *list; /**< Buffer information */
438};
439
440/**
441 * DRM_IOCTL_DMA ioctl argument type.
442 *
443 * Indices here refer to the offset into the buffer list in drm_buf_get.
444 *
445 * \sa drmDMA().
446 */
447struct drm_dma {
448 int context; /**< Context handle */
449 int send_count; /**< Number of buffers to send */
450 int __user *send_indices; /**< List of handles to buffers */
451 int __user *send_sizes; /**< Lengths of data to send */
452 enum drm_dma_flags flags; /**< Flags */
453 int request_count; /**< Number of buffers requested */
454 int request_size; /**< Desired size for buffers */
455 int __user *request_indices; /**< Buffer information */
456 int __user *request_sizes;
457 int granted_count; /**< Number of buffers granted */
458};
459
460enum drm_ctx_flags {
461 _DRM_CONTEXT_PRESERVED = 0x01,
462 _DRM_CONTEXT_2DONLY = 0x02
463};
464
465/**
466 * DRM_IOCTL_ADD_CTX ioctl argument type.
467 *
468 * \sa drmCreateContext() and drmDestroyContext().
469 */
470struct drm_ctx {
471 drm_context_t handle;
472 enum drm_ctx_flags flags;
473};
474
475/**
476 * DRM_IOCTL_RES_CTX ioctl argument type.
477 */
478struct drm_ctx_res {
479 int count;
480 struct drm_ctx __user *contexts;
481};
482
483/**
484 * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
485 */
486struct drm_draw {
487 drm_drawable_t handle;
488};
489
490/**
491 * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
492 */
493typedef enum {
494 DRM_DRAWABLE_CLIPRECTS,
495} drm_drawable_info_type_t;
496
497struct drm_update_draw {
498 drm_drawable_t handle;
499 unsigned int type;
500 unsigned int num;
501 unsigned long long data;
502};
503
504/**
505 * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
506 */
507struct drm_auth {
508 drm_magic_t magic;
509};
510
511/**
512 * DRM_IOCTL_IRQ_BUSID ioctl argument type.
513 *
514 * \sa drmGetInterruptFromBusID().
515 */
516struct drm_irq_busid {
517 int irq; /**< IRQ number */
518 int busnum; /**< bus number */
519 int devnum; /**< device number */
520 int funcnum; /**< function number */
521};
522
523enum drm_vblank_seq_type {
524 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
525 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
526 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
527 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
528 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
529 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
530};
531
532#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
533#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
534 _DRM_VBLANK_NEXTONMISS)
535
536struct drm_wait_vblank_request {
537 enum drm_vblank_seq_type type;
538 unsigned int sequence;
539 unsigned long signal;
540};
541
542struct drm_wait_vblank_reply {
543 enum drm_vblank_seq_type type;
544 unsigned int sequence;
545 long tval_sec;
546 long tval_usec;
547};
548
549/**
550 * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
551 *
552 * \sa drmWaitVBlank().
553 */
554union drm_wait_vblank {
555 struct drm_wait_vblank_request request;
556 struct drm_wait_vblank_reply reply;
557};
558
559
560#define _DRM_PRE_MODESET 1
561#define _DRM_POST_MODESET 2
562
563/**
564 * DRM_IOCTL_MODESET_CTL ioctl argument type
565 *
566 * \sa drmModesetCtl().
567 */
568struct drm_modeset_ctl {
569 uint32_t crtc;
570 uint32_t cmd;
571};
572
573/**
574 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
575 *
576 * \sa drmAgpEnable().
577 */
578struct drm_agp_mode {
579 unsigned long mode; /**< AGP mode */
580};
581
582/**
583 * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
584 *
585 * \sa drmAgpAlloc() and drmAgpFree().
586 */
587struct drm_agp_buffer {
588 unsigned long size; /**< In bytes -- will round to page boundary */
589 unsigned long handle; /**< Used for binding / unbinding */
590 unsigned long type; /**< Type of memory to allocate */
591 unsigned long physical; /**< Physical used by i810 */
592};
593
594/**
595 * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
596 *
597 * \sa drmAgpBind() and drmAgpUnbind().
598 */
599struct drm_agp_binding {
600 unsigned long handle; /**< From drm_agp_buffer */
601 unsigned long offset; /**< In bytes -- will round to page boundary */
602};
603
604/**
605 * DRM_IOCTL_AGP_INFO ioctl argument type.
606 *
607 * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
608 * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
609 * drmAgpVendorId() and drmAgpDeviceId().
610 */
611struct drm_agp_info {
612 int agp_version_major;
613 int agp_version_minor;
614 unsigned long mode;
615 unsigned long aperture_base; /**< physical address */
616 unsigned long aperture_size; /**< bytes */
617 unsigned long memory_allowed; /**< bytes */
618 unsigned long memory_used;
619
620 /** \name PCI information */
621 /*@{ */
622 unsigned short id_vendor;
623 unsigned short id_device;
624 /*@} */
625};
626
627/**
628 * DRM_IOCTL_SG_ALLOC ioctl argument type.
629 */
630struct drm_scatter_gather {
631 unsigned long size; /**< In bytes -- will round to page boundary */
632 unsigned long handle; /**< Used for mapping / unmapping */
633};
634
635/**
636 * DRM_IOCTL_SET_VERSION ioctl argument type.
637 */
638struct drm_set_version {
639 int drm_di_major;
640 int drm_di_minor;
641 int drm_dd_major;
642 int drm_dd_minor;
643};
644
645
646#define DRM_FENCE_FLAG_EMIT 0x00000001
647#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
648/**
649 * On hardware with no interrupt events for operation completion,
650 * indicates that the kernel should sleep while waiting for any blocking
651 * operation to complete rather than spinning.
652 *
653 * Has no effect otherwise.
654 */
655#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
656#define DRM_FENCE_FLAG_NO_USER 0x00000010
657
658/* Reserved for driver use */
659#define DRM_FENCE_MASK_DRIVER 0xFF000000
660
661#define DRM_FENCE_TYPE_EXE 0x00000001
662
663struct drm_fence_arg {
664 unsigned int handle;
665 unsigned int fence_class;
666 unsigned int type;
667 unsigned int flags;
668 unsigned int signaled;
669 unsigned int error;
670 unsigned int sequence;
671 unsigned int pad64;
672 uint64_t expand_pad[2]; /*Future expansion */
673};
674
675/* Buffer permissions, referring to how the GPU uses the buffers.
676 * these translate to fence types used for the buffers.
677 * Typically a texture buffer is read, A destination buffer is write and
678 * a command (batch-) buffer is exe. Can be or-ed together.
679 */
680
681#define DRM_BO_FLAG_READ (1ULL << 0)
682#define DRM_BO_FLAG_WRITE (1ULL << 1)
683#define DRM_BO_FLAG_EXE (1ULL << 2)
684
685/*
686 * All of the bits related to access mode
687 */
688#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
689/*
690 * Status flags. Can be read to determine the actual state of a buffer.
691 * Can also be set in the buffer mask before validation.
692 */
693
694/*
695 * Mask: Never evict this buffer. Not even with force. This type of buffer is only
696 * available to root and must be manually removed before buffer manager shutdown
697 * or lock.
698 * Flags: Acknowledge
699 */
700#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
701
702/*
703 * Mask: Require that the buffer is placed in mappable memory when validated.
704 * If not set the buffer may or may not be in mappable memory when validated.
705 * Flags: If set, the buffer is in mappable memory.
706 */
707#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
708
709/* Mask: The buffer should be shareable with other processes.
710 * Flags: The buffer is shareable with other processes.
711 */
712#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
713
714/* Mask: If set, place the buffer in cache-coherent memory if available.
715 * If clear, never place the buffer in cache coherent memory if validated.
716 * Flags: The buffer is currently in cache-coherent memory.
717 */
718#define DRM_BO_FLAG_CACHED (1ULL << 7)
719
720/* Mask: Make sure that every time this buffer is validated,
721 * it ends up on the same location provided that the memory mask is the same.
722 * The buffer will also not be evicted when claiming space for
723 * other buffers. Basically a pinned buffer but it may be thrown out as
724 * part of buffer manager shutdown or locking.
725 * Flags: Acknowledge.
726 */
727#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
728
729/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
730 * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
731 * with unsnooped PTEs instead of snooped, by using chipset-specific cache
732 * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
733 * as the eviction to local memory (TTM unbind) on map is just a side effect
734 * to prevent aggressive cache prefetch from the GPU disturbing the cache
735 * management that the DRM is doing.
736 *
737 * Flags: Acknowledge.
738 * Buffers allocated with this flag should not be used for suballocators
739 * This type may have issues on CPUs with over-aggressive caching
740 * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
741 */
742#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
743
744
745/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
746 * Flags: Acknowledge.
747 */
748#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
749
750/*
751 * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
752 * Flags: Acknowledge.
753 */
754#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
755#define DRM_BO_FLAG_TILE (1ULL << 15)
756
757/*
758 * Memory type flags that can be or'ed together in the mask, but only
759 * one appears in flags.
760 */
761
762/* System memory */
763#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
764/* Translation table memory */
765#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
766/* Vram memory */
767#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
768/* Up to the driver to define. */
769#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
770#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
771#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
772#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
773#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
774/* We can add more of these now with a 64-bit flag type */
775
776/*
777 * This is a mask covering all of the memory type flags; easier to just
778 * use a single constant than a bunch of | values. It covers
779 * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
780 */
781#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
782/*
783 * This adds all of the CPU-mapping options in with the memory
784 * type to label all bits which change how the page gets mapped
785 */
786#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
787 DRM_BO_FLAG_CACHED_MAPPED | \
788 DRM_BO_FLAG_CACHED | \
789 DRM_BO_FLAG_MAPPABLE)
790
791/* Driver-private flags */
792#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
793
794/*
795 * Don't block on validate and map. Instead, return EBUSY.
796 */
797#define DRM_BO_HINT_DONT_BLOCK 0x00000002
798/*
799 * Don't place this buffer on the unfenced list. This means
800 * that the buffer will not end up having a fence associated
801 * with it as a result of this operation
802 */
803#define DRM_BO_HINT_DONT_FENCE 0x00000004
804/**
805 * On hardware with no interrupt events for operation completion,
806 * indicates that the kernel should sleep while waiting for any blocking
807 * operation to complete rather than spinning.
808 *
809 * Has no effect otherwise.
810 */
811#define DRM_BO_HINT_WAIT_LAZY 0x00000008
812/*
813 * The client has compute relocations refering to this buffer using the
814 * offset in the presumed_offset field. If that offset ends up matching
815 * where this buffer lands, the kernel is free to skip executing those
816 * relocations
817 */
818#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
819
820#define DRM_BO_INIT_MAGIC 0xfe769812
821#define DRM_BO_INIT_MAJOR 1
822#define DRM_BO_INIT_MINOR 0
823#define DRM_BO_INIT_PATCH 0
824
825
826struct drm_bo_info_req {
827 uint64_t mask;
828 uint64_t flags;
829 unsigned int handle;
830 unsigned int hint;
831 unsigned int fence_class;
832 unsigned int desired_tile_stride;
833 unsigned int tile_info;
834 unsigned int pad64;
835 uint64_t presumed_offset;
836};
837
838struct drm_bo_create_req {
839 uint64_t flags;
840 uint64_t size;
841 uint64_t buffer_start;
842 unsigned int hint;
843 unsigned int page_alignment;
844};
845
846
847/*
848 * Reply flags
849 */
850
851#define DRM_BO_REP_BUSY 0x00000001
852
853struct drm_bo_info_rep {
854 uint64_t flags;
855 uint64_t proposed_flags;
856 uint64_t size;
857 uint64_t offset;
858 uint64_t arg_handle;
859 uint64_t buffer_start;
860 unsigned int handle;
861 unsigned int fence_flags;
862 unsigned int rep_flags;
863 unsigned int page_alignment;
864 unsigned int desired_tile_stride;
865 unsigned int hw_tile_stride;
866 unsigned int tile_info;
867 unsigned int pad64;
868 uint64_t expand_pad[4]; /*Future expansion */
869};
870
871struct drm_bo_arg_rep {
872 struct drm_bo_info_rep bo_info;
873 int ret;
874 unsigned int pad64;
875};
876
877struct drm_bo_create_arg {
878 union {
879 struct drm_bo_create_req req;
880 struct drm_bo_info_rep rep;
881 } d;
882};
883
884struct drm_bo_handle_arg {
885 unsigned int handle;
886};
887
888struct drm_bo_reference_info_arg {
889 union {
890 struct drm_bo_handle_arg req;
891 struct drm_bo_info_rep rep;
892 } d;
893};
894
895struct drm_bo_map_wait_idle_arg {
896 union {
897 struct drm_bo_info_req req;
898 struct drm_bo_info_rep rep;
899 } d;
900};
901
902struct drm_bo_op_req {
903 enum {
904 drm_bo_validate,
905 drm_bo_fence,
906 drm_bo_ref_fence,
907 } op;
908 unsigned int arg_handle;
909 struct drm_bo_info_req bo_req;
910};
911
912
913struct drm_bo_op_arg {
914 uint64_t next;
915 union {
916 struct drm_bo_op_req req;
917 struct drm_bo_arg_rep rep;
918 } d;
919 int handled;
920 unsigned int pad64;
921};
922
923
924#define DRM_BO_MEM_LOCAL 0
925#define DRM_BO_MEM_TT 1
926#define DRM_BO_MEM_VRAM 2
927#define DRM_BO_MEM_PRIV0 3
928#define DRM_BO_MEM_PRIV1 4
929#define DRM_BO_MEM_PRIV2 5
930#define DRM_BO_MEM_PRIV3 6
931#define DRM_BO_MEM_PRIV4 7
932
933#define DRM_BO_MEM_TYPES 8 /* For now. */
934
935#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
936#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
937
938struct drm_bo_version_arg {
939 uint32_t major;
940 uint32_t minor;
941 uint32_t patchlevel;
942};
943
944struct drm_mm_type_arg {
945 unsigned int mem_type;
946 unsigned int lock_flags;
947};
948
949struct drm_mm_init_arg {
950 unsigned int magic;
951 unsigned int major;
952 unsigned int minor;
953 unsigned int mem_type;
954 uint64_t p_offset;
955 uint64_t p_size;
956};
957
958struct drm_mm_info_arg {
959 unsigned int mem_type;
960 uint64_t p_size;
961};
962
963struct drm_gem_close {
964 /** Handle of the object to be closed. */
965 uint32_t handle;
966 uint32_t pad;
967};
968
969struct drm_gem_flink {
970 /** Handle for the object being named */
971 uint32_t handle;
972
973 /** Returned global name */
974 uint32_t name;
975};
976
977struct drm_gem_open {
978 /** Name of object being opened */
979 uint32_t name;
980
981 /** Returned handle for the object */
982 uint32_t handle;
983
984 /** Returned size of the object */
985 uint64_t size;
986};
987
988#include "drm_mode.h"
989
990/**
991 * \name Ioctls Definitions
992 */
993/*@{*/
994
995#define DRM_IOCTL_BASE 'd'
996#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
997#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
998#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
999#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
1000
1001#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
1002#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
1003#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
1004#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
1005#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
1006#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
1007#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
1008#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
1009#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
1010
1011#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
1012#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
1013#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
1014
1015#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
1016#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
1017#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
1018#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
1019#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
1020#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
1021#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
1022#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
1023#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
1024#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
1025#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
1026
1027#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
1028
1029#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1030#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1031
1032#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
1033#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
1034
1035#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
1036#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
1037#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
1038#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
1039#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
1040#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
1041#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
1042#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
1043#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
1044#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
1045#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
1046#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
1047#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
1048
1049#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
1050#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
1051#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
1052#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
1053#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
1054#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
1055#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
1056#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
1057
1058#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
1059#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
1060
1061#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
1062
1063#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
1064
1065#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
1066#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
1067#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
1068#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
1069
1070#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
1071#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
1072#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
1073#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
1074#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
1075#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
1076#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
1077#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
1078
1079#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
1080#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
1081#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
1082#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
1083#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
1084#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
1085#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
1086#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
1087#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
1088#define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg)
1089
1090#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
1091
1092#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
1093#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
1094#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
1095#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
1096#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
1097
1098#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
1099
1100#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
1101#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
1102#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
1103#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
1104#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
1105#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
1106
1107#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
1108#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
1109#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, uint32_t)
1110#define DRM_IOCTL_MODE_REPLACEFB DRM_IOWR(0xB0, struct drm_mode_fb_cmd)
1111
1112/*@}*/
1113
1114/**
1115 * Device specific ioctls should only be in their respective headers
1116 * The device specific ioctl range is from 0x40 to 0x99.
1117 * Generic IOCTLS restart at 0xA0.
1118 *
1119 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1120 * drmCommandReadWrite().
1121 */
1122#define DRM_COMMAND_BASE 0x40
1123#define DRM_COMMAND_END 0xA0
1124
1125/* typedef area */
1126#ifndef __KERNEL__
1127typedef struct drm_clip_rect drm_clip_rect_t;
1128typedef struct drm_tex_region drm_tex_region_t;
1129typedef struct drm_hw_lock drm_hw_lock_t;
1130typedef struct drm_version drm_version_t;
1131typedef struct drm_unique drm_unique_t;
1132typedef struct drm_list drm_list_t;
1133typedef struct drm_block drm_block_t;
1134typedef struct drm_control drm_control_t;
1135typedef enum drm_map_type drm_map_type_t;
1136typedef enum drm_map_flags drm_map_flags_t;
1137typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1138typedef struct drm_map drm_map_t;
1139typedef struct drm_client drm_client_t;
1140typedef enum drm_stat_type drm_stat_type_t;
1141typedef struct drm_stats drm_stats_t;
1142typedef enum drm_lock_flags drm_lock_flags_t;
1143typedef struct drm_lock drm_lock_t;
1144typedef enum drm_dma_flags drm_dma_flags_t;
1145typedef struct drm_buf_desc drm_buf_desc_t;
1146typedef struct drm_buf_info drm_buf_info_t;
1147typedef struct drm_buf_free drm_buf_free_t;
1148typedef struct drm_buf_pub drm_buf_pub_t;
1149typedef struct drm_buf_map drm_buf_map_t;
1150typedef struct drm_dma drm_dma_t;
1151typedef union drm_wait_vblank drm_wait_vblank_t;
1152typedef struct drm_agp_mode drm_agp_mode_t;
1153typedef enum drm_ctx_flags drm_ctx_flags_t;
1154typedef struct drm_ctx drm_ctx_t;
1155typedef struct drm_ctx_res drm_ctx_res_t;
1156typedef struct drm_draw drm_draw_t;
1157typedef struct drm_update_draw drm_update_draw_t;
1158typedef struct drm_auth drm_auth_t;
1159typedef struct drm_irq_busid drm_irq_busid_t;
1160typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1161typedef struct drm_agp_buffer drm_agp_buffer_t;
1162typedef struct drm_agp_binding drm_agp_binding_t;
1163typedef struct drm_agp_info drm_agp_info_t;
1164typedef struct drm_scatter_gather drm_scatter_gather_t;
1165typedef struct drm_set_version drm_set_version_t;
1166
1167typedef struct drm_fence_arg drm_fence_arg_t;
1168typedef struct drm_mm_type_arg drm_mm_type_arg_t;
1169typedef struct drm_mm_init_arg drm_mm_init_arg_t;
1170typedef enum drm_bo_type drm_bo_type_t;
1171#endif
1172
1173#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette