VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_main.c@ 74820

Last change on this file since 74820 was 74779, checked in by vboxsync, 6 years ago

Additions/linux/vboxvideo: only set the views once, at initialisation time.
bugref: 9253: Enabling 2nd virtual screen results in an assert.
Views in vboxvideo/HGSMI are a concept used to tell the graphics card about
the virtual frame-buffer in use for each virtual screen. Up until now we have
set these prior to each mode-set operation. The assertion reported in this
bug was due to a mismatch when enabling and disabling screens using a mode-set
for DPMS.
In fact though, there is no good reason for the graphics hardware to need to
know about the frame-buffers at all. It is enough for it to know the offset
in the video memory where scan-out starts, along with size and stride. So
this change switches to just setting the views once, at driver initialisation
time, and setting them to cover the whole range of potentially usable video
memory for each screen at that point. Thus whatever framebuffer is created
for a mode-set will always be a subset of the view.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.7 KB
Line 
1/* $Id: vbox_main.c 74779 2018-10-12 08:05:16Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2017 Oracle Corporation
8 * This file is based on ast_main.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 * Authors: Dave Airlie <airlied@redhat.com>,
32 * Michael Thayer <michael.thayer@oracle.com,
33 * Hans de Goede <hdegoede@redhat.com>
34 */
35#include "vbox_drv.h"
36#include <drm/drm_fb_helper.h>
37#include <drm/drm_crtc_helper.h>
38
39#include <VBoxVideoGuest.h>
40#include <VBoxVideoVBE.h>
41
42#include "hgsmi_channels.h"
43
44static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
45{
46 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
47
48 if (vbox_fb->obj)
49 drm_gem_object_unreference_unlocked(vbox_fb->obj);
50
51 drm_framebuffer_cleanup(fb);
52 kfree(fb);
53}
54
55void vbox_enable_accel(struct vbox_private *vbox)
56{
57 unsigned int i;
58 struct VBVABUFFER *vbva;
59
60 if (!vbox->vbva_info || !vbox->vbva_buffers) {
61 /* Should never happen... */
62 DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
63 return;
64 }
65
66 for (i = 0; i < vbox->num_crtcs; ++i) {
67 if (vbox->vbva_info[i].pVBVA)
68 continue;
69
70 vbva = (void *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
71 if (!VBoxVBVAEnable(&vbox->vbva_info[i],
72 vbox->guest_pool, vbva, i)) {
73 /* very old host or driver error. */
74 DRM_ERROR("vboxvideo: vbva_enable failed\n");
75 return;
76 }
77 }
78}
79
80void vbox_disable_accel(struct vbox_private *vbox)
81{
82 unsigned int i;
83
84 for (i = 0; i < vbox->num_crtcs; ++i)
85 VBoxVBVADisable(&vbox->vbva_info[i], vbox->guest_pool, i);
86}
87
88void vbox_report_caps(struct vbox_private *vbox)
89{
90 u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
91 VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
92
93 if (vbox->initial_mode_queried)
94 caps |= VBVACAPS_VIDEO_MODE_HINTS;
95
96 VBoxHGSMISendCapsInfo(vbox->guest_pool, caps);
97}
98
99/**
100 * Send information about dirty rectangles to VBVA. If necessary we enable
101 * VBVA first, as this is normally disabled after a change of master in case
102 * the new master does not send dirty rectangle information (is this even
103 * allowed?)
104 */
105void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
106 struct drm_clip_rect *rects,
107 unsigned int num_rects)
108{
109 struct vbox_private *vbox = fb->dev->dev_private;
110 struct drm_crtc *crtc;
111 unsigned int i;
112
113 /* The user can send rectangles, we do not need the timer. */
114 vbox->need_refresh_timer = false;
115 mutex_lock(&vbox->hw_mutex);
116 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
117 if (CRTC_FB(crtc) != fb)
118 continue;
119
120 for (i = 0; i < num_rects; ++i) {
121 VBVACMDHDR cmd_hdr;
122 unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
123
124 if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
125 (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
126 (rects[i].x2 < crtc->x) ||
127 (rects[i].y2 < crtc->y))
128 continue;
129
130 cmd_hdr.x = (s16)rects[i].x1;
131 cmd_hdr.y = (s16)rects[i].y1;
132 cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
133 cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
134
135 if (!VBoxVBVABufferBeginUpdate(&vbox->vbva_info[crtc_id],
136 vbox->guest_pool))
137 continue;
138
139 VBoxVBVAWrite(&vbox->vbva_info[crtc_id], vbox->guest_pool,
140 &cmd_hdr, sizeof(cmd_hdr));
141 VBoxVBVABufferEndUpdate(&vbox->vbva_info[crtc_id]);
142 }
143 }
144 mutex_unlock(&vbox->hw_mutex);
145}
146
147static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
148 struct drm_file *file_priv,
149 unsigned int flags, unsigned int color,
150 struct drm_clip_rect *rects,
151 unsigned int num_rects)
152{
153 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
154
155 return 0;
156}
157
158static const struct drm_framebuffer_funcs vbox_fb_funcs = {
159 .destroy = vbox_user_framebuffer_destroy,
160 .dirty = vbox_user_framebuffer_dirty,
161};
162
163int vbox_framebuffer_init(struct drm_device *dev,
164 struct vbox_framebuffer *vbox_fb,
165#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || defined(RHEL_73)
166 const struct DRM_MODE_FB_CMD *mode_cmd,
167#else
168 struct DRM_MODE_FB_CMD *mode_cmd,
169#endif
170 struct drm_gem_object *obj)
171{
172 int ret;
173
174#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
175 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
176#else
177 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
178#endif
179 vbox_fb->obj = obj;
180 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
181 if (ret) {
182 DRM_ERROR("framebuffer init failed %d\n", ret);
183 return ret;
184 }
185
186 return 0;
187}
188
189static struct drm_framebuffer *vbox_user_framebuffer_create(
190 struct drm_device *dev,
191 struct drm_file *filp,
192#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) || defined(RHEL_73)
193 const struct drm_mode_fb_cmd2 *mode_cmd)
194#else
195 struct drm_mode_fb_cmd2 *mode_cmd)
196#endif
197{
198 struct drm_gem_object *obj;
199 struct vbox_framebuffer *vbox_fb;
200 int ret = -ENOMEM;
201
202#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || defined(RHEL_74)
203 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
204#else
205 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
206#endif
207 if (!obj)
208 return ERR_PTR(-ENOENT);
209
210 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
211 if (!vbox_fb)
212 goto err_unref_obj;
213
214 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
215 if (ret)
216 goto err_free_vbox_fb;
217
218 return &vbox_fb->base;
219
220err_free_vbox_fb:
221 kfree(vbox_fb);
222err_unref_obj:
223 drm_gem_object_unreference_unlocked(obj);
224 return ERR_PTR(ret);
225}
226
227static const struct drm_mode_config_funcs vbox_mode_funcs = {
228 .fb_create = vbox_user_framebuffer_create,
229};
230
231#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) && !defined(RHEL_73)
232#define pci_iomap_range(dev, bar, offset, maxlen) \
233 ioremap(pci_resource_start(dev, bar) + (offset), maxlen)
234#endif
235
236/**
237 * Tell the host about the views. This design originally targeted the
238 * Windows XP driver architecture and assumed that each screen would
239 * have a dedicated frame buffer with the command buffer following it,
240 * the whole being a "view". The host works out which screen a command
241 * buffer belongs to by checking whether it is in the first view, then
242 * whether it is in the second and so on. The first match wins. We
243 * cheat around this by making the first view be the managed memory
244 * plus the first command buffer, the second the same plus the second
245 * buffer and so on.
246 */
247static int vbox_set_views(struct vbox_private *vbox)
248{
249 VBVAINFOVIEW *p;
250 int i;
251
252 p = VBoxHGSMIBufferAlloc(vbox->guest_pool, sizeof(*p),
253 HGSMI_CH_VBVA, VBVA_INFO_VIEW);
254 if (!p)
255 return -ENOMEM;
256
257 for (i = 0; i < vbox->num_crtcs; ++i) {
258 p->u32ViewIndex = i;
259 p->u32ViewOffset = 0;
260 p->u32ViewSize = vbox->available_vram_size +
261 i * VBVA_MIN_BUFFER_SIZE;
262 p->u32MaxScreenSize = vbox->available_vram_size;
263
264 VBoxHGSMIBufferSubmit(vbox->guest_pool, p);
265 }
266
267 VBoxHGSMIBufferFree(vbox->guest_pool, p);
268
269 return 0;
270}
271
272static int vbox_accel_init(struct vbox_private *vbox)
273{
274 unsigned int i, ret;
275
276 vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
277 sizeof(*vbox->vbva_info), GFP_KERNEL);
278 if (!vbox->vbva_info)
279 return -ENOMEM;
280
281 /* Take a command buffer for each screen from the end of usable VRAM. */
282 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
283
284 vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
285 vbox->available_vram_size,
286 vbox->num_crtcs *
287 VBVA_MIN_BUFFER_SIZE);
288 if (!vbox->vbva_buffers)
289 return -ENOMEM;
290
291 for (i = 0; i < vbox->num_crtcs; ++i)
292 VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
293 vbox->available_vram_size +
294 i * VBVA_MIN_BUFFER_SIZE,
295 VBVA_MIN_BUFFER_SIZE);
296
297 vbox_enable_accel(vbox);
298 ret = vbox_set_views(vbox);
299 if (ret)
300 goto err_pci_iounmap;
301
302 return 0;
303
304err_pci_iounmap:
305 pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
306 return ret;
307}
308
309static void vbox_accel_fini(struct vbox_private *vbox)
310{
311 vbox_disable_accel(vbox);
312 pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
313}
314
315/** Do we support the 4.3 plus mode hint reporting interface? */
316static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
317{
318 u32 have_hints, have_cursor;
319 int ret;
320
321 ret = VBoxQueryConfHGSMI(vbox->guest_pool,
322 VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
323 &have_hints);
324 if (ret)
325 return false;
326
327 ret = VBoxQueryConfHGSMI(vbox->guest_pool,
328 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
329 &have_cursor);
330 if (ret)
331 return false;
332
333 return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
334}
335
336/**
337 * Our refresh timer call-back. Only used for guests without dirty rectangle
338 * support.
339 */
340static void vbox_refresh_timer(struct work_struct *work)
341{
342 struct vbox_private *vbox = container_of(work, struct vbox_private,
343 refresh_work.work);
344 bool have_unblanked = false;
345 struct drm_crtc *crtci;
346
347 if (!vbox->need_refresh_timer)
348 return;
349 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
350 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtci);
351 if (crtci->enabled && !vbox_crtc->blanked)
352 have_unblanked = true;
353 }
354 if (!have_unblanked)
355 return;
356 /* This forces a full refresh. */
357 vbox_enable_accel(vbox);
358 /* Schedule the next timer iteration. */
359 schedule_delayed_work(&vbox->refresh_work, VBOX_REFRESH_PERIOD);
360}
361
362static bool vbox_check_supported(u16 id)
363{
364 u16 dispi_id;
365
366 vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
367 dispi_id = inw(VBE_DISPI_IOPORT_DATA);
368
369 return dispi_id == id;
370}
371
372/**
373 * Set up our heaps and data exchange buffers in VRAM before handing the rest
374 * to the memory manager.
375 */
376static int vbox_hw_init(struct vbox_private *vbox)
377{
378 int ret = -ENOMEM;
379
380 vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
381 vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
382
383 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
384
385 /* Map guest-heap at end of vram */
386 vbox->guest_heap =
387 pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
388 GUEST_HEAP_SIZE);
389 if (!vbox->guest_heap)
390 return -ENOMEM;
391
392 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
393 vbox->guest_pool = gen_pool_create(4, -1);
394 if (!vbox->guest_pool)
395 goto err_unmap_guest_heap;
396
397 ret = gen_pool_add_virt(vbox->guest_pool,
398 (unsigned long)vbox->guest_heap,
399 GUEST_HEAP_OFFSET(vbox),
400 GUEST_HEAP_USABLE_SIZE, -1);
401 if (ret)
402 goto err_destroy_guest_pool;
403
404 /* Reduce available VRAM size to reflect the guest heap. */
405 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
406 /* Linux drm represents monitors as a 32-bit array. */
407 VBoxQueryConfHGSMI(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
408 &vbox->num_crtcs);
409 vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
410
411 if (!have_hgsmi_mode_hints(vbox)) {
412 ret = -ENOTSUPP;
413 goto err_destroy_guest_pool;
414 }
415
416 vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
417 sizeof(VBVAMODEHINT),
418 GFP_KERNEL);
419 if (!vbox->last_mode_hints) {
420 ret = -ENOMEM;
421 goto err_destroy_guest_pool;
422 }
423
424 ret = vbox_accel_init(vbox);
425 if (ret)
426 goto err_destroy_guest_pool;
427
428 /* Set up the refresh timer for users which do not send dirty rectangles. */
429 INIT_DELAYED_WORK(&vbox->refresh_work, vbox_refresh_timer);
430
431 return 0;
432
433err_destroy_guest_pool:
434 gen_pool_destroy(vbox->guest_pool);
435err_unmap_guest_heap:
436 pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
437 return ret;
438}
439
440static void vbox_hw_fini(struct vbox_private *vbox)
441{
442 vbox->need_refresh_timer = false;
443 cancel_delayed_work(&vbox->refresh_work);
444 vbox_accel_fini(vbox);
445 gen_pool_destroy(vbox->guest_pool);
446 pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
447}
448
449int vbox_driver_load(struct drm_device *dev, unsigned long flags)
450{
451 struct vbox_private *vbox;
452 int ret = 0;
453
454 if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
455 return -ENODEV;
456
457 vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
458 if (!vbox)
459 return -ENOMEM;
460
461 dev->dev_private = vbox;
462 vbox->dev = dev;
463
464 mutex_init(&vbox->hw_mutex);
465
466 ret = vbox_hw_init(vbox);
467 if (ret)
468 return ret;
469
470 ret = vbox_mm_init(vbox);
471 if (ret)
472 goto err_hw_fini;
473
474 drm_mode_config_init(dev);
475
476 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
477 dev->mode_config.min_width = 64;
478 dev->mode_config.min_height = 64;
479 dev->mode_config.preferred_depth = 24;
480 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
481 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
482
483 ret = vbox_mode_init(dev);
484 if (ret)
485 goto err_drm_mode_cleanup;
486
487 ret = vbox_irq_init(vbox);
488 if (ret)
489 goto err_mode_fini;
490
491 ret = vbox_fbdev_init(dev);
492 if (ret)
493 goto err_irq_fini;
494
495 return 0;
496
497err_irq_fini:
498 vbox_irq_fini(vbox);
499err_mode_fini:
500 vbox_mode_fini(dev);
501err_drm_mode_cleanup:
502 drm_mode_config_cleanup(dev);
503 vbox_mm_fini(vbox);
504err_hw_fini:
505 vbox_hw_fini(vbox);
506 return ret;
507}
508
509#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || defined(RHEL_75)
510void vbox_driver_unload(struct drm_device *dev)
511#else
512int vbox_driver_unload(struct drm_device *dev)
513#endif
514{
515 struct vbox_private *vbox = dev->dev_private;
516
517 vbox_fbdev_fini(dev);
518 vbox_irq_fini(vbox);
519 vbox_mode_fini(dev);
520 drm_mode_config_cleanup(dev);
521 vbox_mm_fini(vbox);
522 vbox_hw_fini(vbox);
523#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) && !defined(RHEL_75)
524 return 0;
525#endif
526}
527
528/**
529 * @note this is described in the DRM framework documentation. AST does not
530 * have it, but we get an oops on driver unload if it is not present.
531 */
532void vbox_driver_lastclose(struct drm_device *dev)
533{
534 struct vbox_private *vbox = dev->dev_private;
535
536#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) || defined(RHEL_71)
537 if (vbox->fbdev)
538 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
539#else
540 drm_modeset_lock_all(dev);
541 if (vbox->fbdev)
542 drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper);
543 drm_modeset_unlock_all(dev);
544#endif
545}
546
547int vbox_gem_create(struct drm_device *dev,
548 u32 size, bool iskernel, struct drm_gem_object **obj)
549{
550 struct vbox_bo *vboxbo;
551 int ret;
552
553 *obj = NULL;
554
555 size = roundup(size, PAGE_SIZE);
556 if (size == 0)
557 return -EINVAL;
558
559 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
560 if (ret) {
561 if (ret != -ERESTARTSYS)
562 DRM_ERROR("failed to allocate GEM object\n");
563 return ret;
564 }
565
566 *obj = &vboxbo->gem;
567
568 return 0;
569}
570
571int vbox_dumb_create(struct drm_file *file,
572 struct drm_device *dev, struct drm_mode_create_dumb *args)
573{
574 int ret;
575 struct drm_gem_object *gobj;
576 u32 handle;
577
578 args->pitch = args->width * ((args->bpp + 7) / 8);
579 args->size = args->pitch * args->height;
580
581 ret = vbox_gem_create(dev, args->size, false, &gobj);
582 if (ret)
583 return ret;
584
585 ret = drm_gem_handle_create(file, gobj, &handle);
586 drm_gem_object_unreference_unlocked(gobj);
587 if (ret)
588 return ret;
589
590 args->handle = handle;
591
592 return 0;
593}
594
595#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && !defined(RHEL_73)
596int vbox_dumb_destroy(struct drm_file *file,
597 struct drm_device *dev, u32 handle)
598{
599 return drm_gem_handle_delete(file, handle);
600}
601#endif
602
603static void vbox_bo_unref(struct vbox_bo **bo)
604{
605 struct ttm_buffer_object *tbo;
606
607 if ((*bo) == NULL)
608 return;
609
610 tbo = &((*bo)->bo);
611 ttm_bo_unref(&tbo);
612 if (!tbo)
613 *bo = NULL;
614}
615
616void vbox_gem_free_object(struct drm_gem_object *obj)
617{
618 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
619
620 vbox_bo_unref(&vbox_bo);
621}
622
623static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
624{
625#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && !defined(RHEL_70)
626 return bo->bo.addr_space_offset;
627#else
628 return drm_vma_node_offset_addr(&bo->bo.vma_node);
629#endif
630}
631
632int
633vbox_dumb_mmap_offset(struct drm_file *file,
634 struct drm_device *dev,
635 u32 handle, u64 *offset)
636{
637 struct drm_gem_object *obj;
638 int ret;
639 struct vbox_bo *bo;
640
641 mutex_lock(&dev->struct_mutex);
642#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || defined(RHEL_74)
643 obj = drm_gem_object_lookup(file, handle);
644#else
645 obj = drm_gem_object_lookup(dev, file, handle);
646#endif
647 if (!obj) {
648 ret = -ENOENT;
649 goto out_unlock;
650 }
651
652 bo = gem_to_vbox_bo(obj);
653 *offset = vbox_bo_mmap_offset(bo);
654
655 drm_gem_object_unreference(obj);
656 ret = 0;
657
658out_unlock:
659 mutex_unlock(&dev->struct_mutex);
660 return ret;
661}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette