VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_main.c@ 79449

Last change on this file since 79449 was 79025, checked in by vboxsync, 6 years ago

Linux/host and guest drivers: support more openSUSE 15.0 and 15.1 kernels.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.7 KB
Line 
1/* $Id: vbox_main.c 79025 2019-06-06 14:33:47Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2019 Oracle Corporation
8 * This file is based on ast_main.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 * Authors: Dave Airlie <airlied@redhat.com>,
32 * Michael Thayer <michael.thayer@oracle.com,
33 * Hans de Goede <hdegoede@redhat.com>
34 */
35#include "vbox_drv.h"
36#include <drm/drm_fb_helper.h>
37#include <drm/drm_crtc_helper.h>
38
39#include <VBoxVideoGuest.h>
40#include <VBoxVideoVBE.h>
41
42#include "hgsmi_channels.h"
43
44static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
45{
46 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
47
48 if (vbox_fb->obj)
49 drm_gem_object_put_unlocked(vbox_fb->obj);
50
51 drm_framebuffer_cleanup(fb);
52 kfree(fb);
53}
54
55void vbox_enable_accel(struct vbox_private *vbox)
56{
57 unsigned int i;
58 struct VBVABUFFER *vbva;
59
60 if (!vbox->vbva_info || !vbox->vbva_buffers) {
61 /* Should never happen... */
62 DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
63 return;
64 }
65
66 for (i = 0; i < vbox->num_crtcs; ++i) {
67 if (vbox->vbva_info[i].pVBVA)
68 continue;
69
70 vbva = (void __force *)vbox->vbva_buffers +
71 i * VBVA_MIN_BUFFER_SIZE;
72 if (!VBoxVBVAEnable(&vbox->vbva_info[i],
73 vbox->guest_pool, vbva, i)) {
74 /* very old host or driver error. */
75 DRM_ERROR("vboxvideo: vbva_enable failed\n");
76 return;
77 }
78 }
79}
80
81void vbox_disable_accel(struct vbox_private *vbox)
82{
83 unsigned int i;
84
85 for (i = 0; i < vbox->num_crtcs; ++i)
86 VBoxVBVADisable(&vbox->vbva_info[i], vbox->guest_pool, i);
87}
88
89void vbox_report_caps(struct vbox_private *vbox)
90{
91 u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
92 VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
93
94 if (vbox->initial_mode_queried)
95 caps |= VBVACAPS_VIDEO_MODE_HINTS;
96
97 VBoxHGSMISendCapsInfo(vbox->guest_pool, caps);
98}
99
100/**
101 * Send information about dirty rectangles to VBVA. If necessary we enable
102 * VBVA first, as this is normally disabled after a change of master in case
103 * the new master does not send dirty rectangle information (is this even
104 * allowed?)
105 */
106void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
107 struct drm_clip_rect *rects,
108 unsigned int num_rects)
109{
110 struct vbox_private *vbox = fb->dev->dev_private;
111 struct drm_crtc *crtc;
112 unsigned int i;
113
114 /* The user can send rectangles, we do not need the timer. */
115 vbox->need_refresh_timer = false;
116 mutex_lock(&vbox->hw_mutex);
117 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
118 if (CRTC_FB(crtc) != fb)
119 continue;
120
121 for (i = 0; i < num_rects; ++i) {
122 VBVACMDHDR cmd_hdr;
123 unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
124
125 if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
126 (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
127 (rects[i].x2 < crtc->x) ||
128 (rects[i].y2 < crtc->y))
129 continue;
130
131 cmd_hdr.x = (s16)rects[i].x1;
132 cmd_hdr.y = (s16)rects[i].y1;
133 cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
134 cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
135
136 if (!VBoxVBVABufferBeginUpdate(&vbox->vbva_info[crtc_id],
137 vbox->guest_pool))
138 continue;
139
140 VBoxVBVAWrite(&vbox->vbva_info[crtc_id], vbox->guest_pool,
141 &cmd_hdr, sizeof(cmd_hdr));
142 VBoxVBVABufferEndUpdate(&vbox->vbva_info[crtc_id]);
143 }
144 }
145 mutex_unlock(&vbox->hw_mutex);
146}
147
148static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
149 struct drm_file *file_priv,
150 unsigned int flags, unsigned int color,
151 struct drm_clip_rect *rects,
152 unsigned int num_rects)
153{
154 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
155
156 return 0;
157}
158
159static const struct drm_framebuffer_funcs vbox_fb_funcs = {
160 .destroy = vbox_user_framebuffer_destroy,
161 .dirty = vbox_user_framebuffer_dirty,
162};
163
164int vbox_framebuffer_init(struct drm_device *dev,
165 struct vbox_framebuffer *vbox_fb,
166#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || defined(RHEL_73)
167 const struct DRM_MODE_FB_CMD *mode_cmd,
168#else
169 struct DRM_MODE_FB_CMD *mode_cmd,
170#endif
171 struct drm_gem_object *obj)
172{
173 int ret;
174
175#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
176 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
177#else
178 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
179#endif
180 vbox_fb->obj = obj;
181 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
182 if (ret) {
183 DRM_ERROR("framebuffer init failed %d\n", ret);
184 return ret;
185 }
186
187 return 0;
188}
189
190static struct drm_framebuffer *vbox_user_framebuffer_create(
191 struct drm_device *dev,
192 struct drm_file *filp,
193#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || defined(RHEL_73)
194 const struct drm_mode_fb_cmd2 *mode_cmd)
195#else
196 struct drm_mode_fb_cmd2 *mode_cmd)
197#endif
198{
199 struct drm_gem_object *obj;
200 struct vbox_framebuffer *vbox_fb;
201 int ret = -ENOMEM;
202
203#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || defined(RHEL_74)
204 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
205#else
206 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
207#endif
208 if (!obj)
209 return ERR_PTR(-ENOENT);
210
211 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
212 if (!vbox_fb)
213 goto err_unref_obj;
214
215 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
216 if (ret)
217 goto err_free_vbox_fb;
218
219 return &vbox_fb->base;
220
221err_free_vbox_fb:
222 kfree(vbox_fb);
223err_unref_obj:
224 drm_gem_object_put_unlocked(obj);
225 return ERR_PTR(ret);
226}
227
228static const struct drm_mode_config_funcs vbox_mode_funcs = {
229 .fb_create = vbox_user_framebuffer_create,
230};
231
232#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) && !defined(RHEL_73)
233#define pci_iomap_range(dev, bar, offset, maxlen) \
234 ioremap(pci_resource_start(dev, bar) + (offset), maxlen)
235#endif
236
237/**
238 * Tell the host about the views. This design originally targeted the
239 * Windows XP driver architecture and assumed that each screen would
240 * have a dedicated frame buffer with the command buffer following it,
241 * the whole being a "view". The host works out which screen a command
242 * buffer belongs to by checking whether it is in the first view, then
243 * whether it is in the second and so on. The first match wins. We
244 * cheat around this by making the first view be the managed memory
245 * plus the first command buffer, the second the same plus the second
246 * buffer and so on.
247 */
248static int vbox_set_views(struct vbox_private *vbox)
249{
250 VBVAINFOVIEW *p;
251 int i;
252
253 p = VBoxHGSMIBufferAlloc(vbox->guest_pool, sizeof(*p),
254 HGSMI_CH_VBVA, VBVA_INFO_VIEW);
255 if (!p)
256 return -ENOMEM;
257
258 for (i = 0; i < vbox->num_crtcs; ++i) {
259 p->u32ViewIndex = i;
260 p->u32ViewOffset = 0;
261 p->u32ViewSize = vbox->available_vram_size +
262 i * VBVA_MIN_BUFFER_SIZE;
263 p->u32MaxScreenSize = vbox->available_vram_size;
264
265 VBoxHGSMIBufferSubmit(vbox->guest_pool, p);
266 }
267
268 VBoxHGSMIBufferFree(vbox->guest_pool, p);
269
270 return 0;
271}
272
273static int vbox_accel_init(struct vbox_private *vbox)
274{
275 unsigned int i, ret;
276
277 vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
278 sizeof(*vbox->vbva_info), GFP_KERNEL);
279 if (!vbox->vbva_info)
280 return -ENOMEM;
281
282 /* Take a command buffer for each screen from the end of usable VRAM. */
283 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
284
285 vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
286 vbox->available_vram_size,
287 vbox->num_crtcs *
288 VBVA_MIN_BUFFER_SIZE);
289 if (!vbox->vbva_buffers)
290 return -ENOMEM;
291
292 for (i = 0; i < vbox->num_crtcs; ++i)
293 VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
294 vbox->available_vram_size +
295 i * VBVA_MIN_BUFFER_SIZE,
296 VBVA_MIN_BUFFER_SIZE);
297
298 vbox_enable_accel(vbox);
299 ret = vbox_set_views(vbox);
300 if (ret)
301 goto err_pci_iounmap;
302
303 return 0;
304
305err_pci_iounmap:
306 pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
307 return ret;
308}
309
310static void vbox_accel_fini(struct vbox_private *vbox)
311{
312 vbox_disable_accel(vbox);
313 pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
314}
315
316/** Do we support the 4.3 plus mode hint reporting interface? */
317static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
318{
319 u32 have_hints, have_cursor;
320 int ret;
321
322 ret = VBoxQueryConfHGSMI(vbox->guest_pool,
323 VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
324 &have_hints);
325 if (ret)
326 return false;
327
328 ret = VBoxQueryConfHGSMI(vbox->guest_pool,
329 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
330 &have_cursor);
331 if (ret)
332 return false;
333
334 return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
335}
336
337/**
338 * Our refresh timer call-back. Only used for guests without dirty rectangle
339 * support.
340 */
341static void vbox_refresh_timer(struct work_struct *work)
342{
343 struct vbox_private *vbox = container_of(work, struct vbox_private,
344 refresh_work.work);
345 bool have_unblanked = false;
346 struct drm_crtc *crtci;
347
348 if (!vbox->need_refresh_timer)
349 return;
350 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
351 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtci);
352 if (crtci->enabled && !vbox_crtc->blanked)
353 have_unblanked = true;
354 }
355 if (!have_unblanked)
356 return;
357 /* This forces a full refresh. */
358 vbox_enable_accel(vbox);
359 /* Schedule the next timer iteration. */
360 schedule_delayed_work(&vbox->refresh_work, VBOX_REFRESH_PERIOD);
361}
362
363static bool vbox_check_supported(u16 id)
364{
365 u16 dispi_id;
366
367 vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
368 dispi_id = inw(VBE_DISPI_IOPORT_DATA);
369
370 return dispi_id == id;
371}
372
373/**
374 * Set up our heaps and data exchange buffers in VRAM before handing the rest
375 * to the memory manager.
376 */
377static int vbox_hw_init(struct vbox_private *vbox)
378{
379 int ret = -ENOMEM;
380
381 vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
382 vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
383
384 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
385
386 /* Map guest-heap at end of vram */
387 vbox->guest_heap =
388 pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
389 GUEST_HEAP_SIZE);
390 if (!vbox->guest_heap)
391 return -ENOMEM;
392
393 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
394 vbox->guest_pool = gen_pool_create(4, -1);
395 if (!vbox->guest_pool)
396 goto err_unmap_guest_heap;
397
398 ret = gen_pool_add_virt(vbox->guest_pool,
399 (unsigned long)vbox->guest_heap,
400 GUEST_HEAP_OFFSET(vbox),
401 GUEST_HEAP_USABLE_SIZE, -1);
402 if (ret)
403 goto err_destroy_guest_pool;
404
405 /* Reduce available VRAM size to reflect the guest heap. */
406 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
407 /* Linux drm represents monitors as a 32-bit array. */
408 VBoxQueryConfHGSMI(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
409 &vbox->num_crtcs);
410 vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
411
412 if (!have_hgsmi_mode_hints(vbox)) {
413 ret = -ENOTSUPP;
414 goto err_destroy_guest_pool;
415 }
416
417 vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
418 sizeof(VBVAMODEHINT),
419 GFP_KERNEL);
420 if (!vbox->last_mode_hints) {
421 ret = -ENOMEM;
422 goto err_destroy_guest_pool;
423 }
424
425 ret = vbox_accel_init(vbox);
426 if (ret)
427 goto err_destroy_guest_pool;
428
429 /* Set up the refresh timer for users which do not send dirty rectangles. */
430 INIT_DELAYED_WORK(&vbox->refresh_work, vbox_refresh_timer);
431
432 return 0;
433
434err_destroy_guest_pool:
435 gen_pool_destroy(vbox->guest_pool);
436err_unmap_guest_heap:
437 pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
438 return ret;
439}
440
441static void vbox_hw_fini(struct vbox_private *vbox)
442{
443 vbox->need_refresh_timer = false;
444 cancel_delayed_work(&vbox->refresh_work);
445 vbox_accel_fini(vbox);
446 gen_pool_destroy(vbox->guest_pool);
447 pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
448}
449
450#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
451int vbox_driver_load(struct drm_device *dev, unsigned long flags)
452#else
453int vbox_driver_load(struct drm_device *dev)
454#endif
455{
456 struct vbox_private *vbox;
457 int ret = 0;
458
459 if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
460 return -ENODEV;
461
462 vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
463 if (!vbox)
464 return -ENOMEM;
465
466 dev->dev_private = vbox;
467 vbox->dev = dev;
468
469 mutex_init(&vbox->hw_mutex);
470
471 ret = vbox_hw_init(vbox);
472 if (ret)
473 return ret;
474
475 ret = vbox_mm_init(vbox);
476 if (ret)
477 goto err_hw_fini;
478
479 drm_mode_config_init(dev);
480
481 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
482 dev->mode_config.min_width = 64;
483 dev->mode_config.min_height = 64;
484 dev->mode_config.preferred_depth = 24;
485 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
486 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
487
488 ret = vbox_mode_init(dev);
489 if (ret)
490 goto err_drm_mode_cleanup;
491
492 ret = vbox_irq_init(vbox);
493 if (ret)
494 goto err_mode_fini;
495
496 ret = vbox_fbdev_init(dev);
497 if (ret)
498 goto err_irq_fini;
499
500 return 0;
501
502err_irq_fini:
503 vbox_irq_fini(vbox);
504err_mode_fini:
505 vbox_mode_fini(dev);
506err_drm_mode_cleanup:
507 drm_mode_config_cleanup(dev);
508 vbox_mm_fini(vbox);
509err_hw_fini:
510 vbox_hw_fini(vbox);
511 return ret;
512}
513
514#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
515void vbox_driver_unload(struct drm_device *dev)
516#else
517int vbox_driver_unload(struct drm_device *dev)
518#endif
519{
520 struct vbox_private *vbox = dev->dev_private;
521
522 vbox_fbdev_fini(dev);
523 vbox_irq_fini(vbox);
524 vbox_mode_fini(dev);
525 drm_mode_config_cleanup(dev);
526 vbox_mm_fini(vbox);
527 vbox_hw_fini(vbox);
528#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) && !defined(RHEL_75)
529 return 0;
530#endif
531}
532
533/**
534 * @note this is described in the DRM framework documentation. AST does not
535 * have it, but we get an oops on driver unload if it is not present.
536 */
537void vbox_driver_lastclose(struct drm_device *dev)
538{
539 struct vbox_private *vbox = dev->dev_private;
540
541#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) || defined(RHEL_71)
542 if (vbox->fbdev)
543 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
544#else
545 drm_modeset_lock_all(dev);
546 if (vbox->fbdev)
547 drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper);
548 drm_modeset_unlock_all(dev);
549#endif
550}
551
552int vbox_gem_create(struct drm_device *dev,
553 u32 size, bool iskernel, struct drm_gem_object **obj)
554{
555 struct vbox_bo *vboxbo;
556 int ret;
557
558 *obj = NULL;
559
560 size = roundup(size, PAGE_SIZE);
561 if (size == 0)
562 return -EINVAL;
563
564 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
565 if (ret) {
566 if (ret != -ERESTARTSYS)
567 DRM_ERROR("failed to allocate GEM object\n");
568 return ret;
569 }
570
571 *obj = &vboxbo->gem;
572
573 return 0;
574}
575
576int vbox_dumb_create(struct drm_file *file,
577 struct drm_device *dev, struct drm_mode_create_dumb *args)
578{
579 int ret;
580 struct drm_gem_object *gobj;
581 u32 handle;
582
583 args->pitch = args->width * ((args->bpp + 7) / 8);
584 args->size = args->pitch * args->height;
585
586 ret = vbox_gem_create(dev, args->size, false, &gobj);
587 if (ret)
588 return ret;
589
590 ret = drm_gem_handle_create(file, gobj, &handle);
591 drm_gem_object_put_unlocked(gobj);
592 if (ret)
593 return ret;
594
595 args->handle = handle;
596
597 return 0;
598}
599
600#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && !defined(RHEL_73)
601int vbox_dumb_destroy(struct drm_file *file,
602 struct drm_device *dev, u32 handle)
603{
604 return drm_gem_handle_delete(file, handle);
605}
606#endif
607
608#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) && !defined(OPENSUSE_151)
609static void ttm_bo_put(struct ttm_buffer_object *bo)
610{
611 ttm_bo_unref(&bo);
612}
613#endif
614
615void vbox_gem_free_object(struct drm_gem_object *obj)
616{
617 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
618
619 ttm_bo_put(&vbox_bo->bo);
620}
621
622static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
623{
624#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && !defined(RHEL_70)
625 return bo->bo.addr_space_offset;
626#else
627 return drm_vma_node_offset_addr(&bo->bo.vma_node);
628#endif
629}
630
631int
632vbox_dumb_mmap_offset(struct drm_file *file,
633 struct drm_device *dev,
634 u32 handle, u64 *offset)
635{
636 struct drm_gem_object *obj;
637 int ret;
638 struct vbox_bo *bo;
639
640 mutex_lock(&dev->struct_mutex);
641#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || defined(RHEL_74)
642 obj = drm_gem_object_lookup(file, handle);
643#else
644 obj = drm_gem_object_lookup(dev, file, handle);
645#endif
646 if (!obj) {
647 ret = -ENOENT;
648 goto out_unlock;
649 }
650
651 bo = gem_to_vbox_bo(obj);
652 *offset = vbox_bo_mmap_offset(bo);
653
654 drm_gem_object_put(obj);
655 ret = 0;
656
657out_unlock:
658 mutex_unlock(&dev->struct_mutex);
659 return ret;
660}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette