VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_main.c

Last change on this file was 106061, checked in by vboxsync, 2 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 18.0 KB
Line 
1/* $Id: vbox_main.c 106061 2024-09-16 14:03:52Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2024 Oracle and/or its affiliates.
8 * This file is based on ast_main.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 * Authors: Dave Airlie <airlied@redhat.com>,
32 * Michael Thayer <michael.thayer@oracle.com,
33 * Hans de Goede <hdegoede@redhat.com>
34 */
35#include "vbox_drv.h"
36#include <drm/drm_fb_helper.h>
37#include <drm/drm_crtc_helper.h>
38
39#if RTLNX_VER_MIN(6,3,0) || RTLNX_RHEL_RANGE(8,9, 8,99) || RTLNX_RHEL_RANGE(9,3, 9,99)
40# include <drm/drm_modeset_helper.h>
41#endif
42
43#include <VBoxVideoGuest.h>
44#include <VBoxVideoVBE.h>
45
46#include "hgsmi_channels.h"
47
48static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
49{
50 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
51
52 if (vbox_fb->obj)
53#if RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
54 drm_gem_object_put(vbox_fb->obj);
55#else
56 drm_gem_object_put_unlocked(vbox_fb->obj);
57#endif
58
59 drm_framebuffer_cleanup(fb);
60 kfree(fb);
61}
62
63void vbox_enable_accel(struct vbox_private *vbox)
64{
65 unsigned int i;
66 struct VBVABUFFER *vbva;
67
68 if (!vbox->vbva_info || !vbox->vbva_buffers) {
69 /* Should never happen... */
70 DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
71 return;
72 }
73
74 for (i = 0; i < vbox->num_crtcs; ++i) {
75 if (vbox->vbva_info[i].pVBVA)
76 continue;
77
78 vbva = (void __force *)vbox->vbva_buffers +
79 i * VBVA_MIN_BUFFER_SIZE;
80 if (!VBoxVBVAEnable(&vbox->vbva_info[i],
81 vbox->guest_pool, vbva, i)) {
82 /* very old host or driver error. */
83 DRM_ERROR("vboxvideo: vbva_enable failed\n");
84 return;
85 }
86 }
87}
88
89void vbox_disable_accel(struct vbox_private *vbox)
90{
91 unsigned int i;
92
93 for (i = 0; i < vbox->num_crtcs; ++i)
94 VBoxVBVADisable(&vbox->vbva_info[i], vbox->guest_pool, i);
95}
96
97void vbox_report_caps(struct vbox_private *vbox)
98{
99 u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
100 VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
101
102 if (vbox->initial_mode_queried)
103 caps |= VBVACAPS_VIDEO_MODE_HINTS;
104
105 VBoxHGSMISendCapsInfo(vbox->guest_pool, caps);
106}
107
108/**
109 * Send information about dirty rectangles to VBVA. If necessary we enable
110 * VBVA first, as this is normally disabled after a change of master in case
111 * the new master does not send dirty rectangle information (is this even
112 * allowed?)
113 */
114void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
115 struct drm_clip_rect *rects,
116 unsigned int num_rects)
117{
118 struct vbox_private *vbox = fb->dev->dev_private;
119 struct drm_crtc *crtc;
120 unsigned int i;
121
122 /* The user can send rectangles, we do not need the timer. */
123 vbox->need_refresh_timer = false;
124 mutex_lock(&vbox->hw_mutex);
125 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
126 if (CRTC_FB(crtc) != fb)
127 continue;
128
129 for (i = 0; i < num_rects; ++i) {
130 VBVACMDHDR cmd_hdr;
131 unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
132
133 if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
134 (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
135 (rects[i].x2 < crtc->x) ||
136 (rects[i].y2 < crtc->y))
137 continue;
138
139 cmd_hdr.x = (s16)rects[i].x1;
140 cmd_hdr.y = (s16)rects[i].y1;
141 cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
142 cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
143
144 if (!VBoxVBVABufferBeginUpdate(&vbox->vbva_info[crtc_id],
145 vbox->guest_pool))
146 continue;
147
148 VBoxVBVAWrite(&vbox->vbva_info[crtc_id], vbox->guest_pool,
149 &cmd_hdr, sizeof(cmd_hdr));
150 VBoxVBVABufferEndUpdate(&vbox->vbva_info[crtc_id]);
151 }
152 }
153 mutex_unlock(&vbox->hw_mutex);
154}
155
156static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
157 struct drm_file *file_priv,
158 unsigned int flags, unsigned int color,
159 struct drm_clip_rect *rects,
160 unsigned int num_rects)
161{
162 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
163
164 return 0;
165}
166
167static const struct drm_framebuffer_funcs vbox_fb_funcs = {
168 .destroy = vbox_user_framebuffer_destroy,
169 .dirty = vbox_user_framebuffer_dirty,
170};
171
172int vbox_framebuffer_init(struct drm_device *dev,
173 struct vbox_framebuffer *vbox_fb,
174#if RTLNX_VER_MIN(4,5,0) || RTLNX_RHEL_MAJ_PREREQ(7,3)
175 const struct DRM_MODE_FB_CMD *mode_cmd,
176#else
177 struct DRM_MODE_FB_CMD *mode_cmd,
178#endif
179 struct drm_gem_object *obj)
180{
181 int ret;
182
183#if RTLNX_VER_MIN(4,11,0) || RTLNX_RHEL_MAJ_PREREQ(7,5)
184 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
185#else
186 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
187#endif
188 vbox_fb->obj = obj;
189 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
190 if (ret) {
191 DRM_ERROR("framebuffer init failed %d\n", ret);
192 return ret;
193 }
194
195 return 0;
196}
197
198static struct drm_framebuffer *vbox_user_framebuffer_create(
199 struct drm_device *dev,
200 struct drm_file *filp,
201#if RTLNX_VER_MIN(4,5,0) || RTLNX_RHEL_MAJ_PREREQ(7,3)
202 const struct drm_mode_fb_cmd2 *mode_cmd)
203#else
204 struct drm_mode_fb_cmd2 *mode_cmd)
205#endif
206{
207 struct drm_gem_object *obj;
208 struct vbox_framebuffer *vbox_fb;
209 int ret = -ENOMEM;
210
211#if RTLNX_VER_MIN(4,7,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)
212 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
213#else
214 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
215#endif
216 if (!obj)
217 return ERR_PTR(-ENOENT);
218
219 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
220 if (!vbox_fb)
221 goto err_unref_obj;
222
223 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
224 if (ret)
225 goto err_free_vbox_fb;
226
227 return &vbox_fb->base;
228
229err_free_vbox_fb:
230 kfree(vbox_fb);
231err_unref_obj:
232#if RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
233 drm_gem_object_put(obj);
234#else
235 drm_gem_object_put_unlocked(obj);
236#endif
237 return ERR_PTR(ret);
238}
239
240static const struct drm_mode_config_funcs vbox_mode_funcs = {
241 .fb_create = vbox_user_framebuffer_create,
242};
243
244#if RTLNX_VER_MAX(4,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,3)
245# define pci_iomap_range(dev, bar, offset, maxlen) \
246 ioremap(pci_resource_start(dev, bar) + (offset), maxlen)
247#endif
248
249/**
250 * Tell the host about the views. This design originally targeted the
251 * Windows XP driver architecture and assumed that each screen would
252 * have a dedicated frame buffer with the command buffer following it,
253 * the whole being a "view". The host works out which screen a command
254 * buffer belongs to by checking whether it is in the first view, then
255 * whether it is in the second and so on. The first match wins. We
256 * cheat around this by making the first view be the managed memory
257 * plus the first command buffer, the second the same plus the second
258 * buffer and so on.
259 */
260static int vbox_set_views(struct vbox_private *vbox)
261{
262 VBVAINFOVIEW *p;
263 int i;
264
265 p = VBoxHGSMIBufferAlloc(vbox->guest_pool, sizeof(*p),
266 HGSMI_CH_VBVA, VBVA_INFO_VIEW);
267 if (!p)
268 return -ENOMEM;
269
270 for (i = 0; i < vbox->num_crtcs; ++i) {
271 p->u32ViewIndex = i;
272 p->u32ViewOffset = 0;
273 p->u32ViewSize = vbox->available_vram_size +
274 i * VBVA_MIN_BUFFER_SIZE;
275 p->u32MaxScreenSize = vbox->available_vram_size;
276
277 VBoxHGSMIBufferSubmit(vbox->guest_pool, p);
278 }
279
280 VBoxHGSMIBufferFree(vbox->guest_pool, p);
281
282 return 0;
283}
284
285static int vbox_accel_init(struct vbox_private *vbox)
286{
287 unsigned int i, ret;
288
289 vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
290 sizeof(*vbox->vbva_info), GFP_KERNEL);
291 if (!vbox->vbva_info)
292 return -ENOMEM;
293
294 /* Take a command buffer for each screen from the end of usable VRAM. */
295 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
296
297 vbox->vbva_buffers = pci_iomap_range(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0,
298 vbox->available_vram_size,
299 vbox->num_crtcs *
300 VBVA_MIN_BUFFER_SIZE);
301 if (!vbox->vbva_buffers)
302 return -ENOMEM;
303
304 for (i = 0; i < vbox->num_crtcs; ++i)
305 VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
306 vbox->available_vram_size +
307 i * VBVA_MIN_BUFFER_SIZE,
308 VBVA_MIN_BUFFER_SIZE);
309
310 vbox_enable_accel(vbox);
311 ret = vbox_set_views(vbox);
312 if (ret)
313 goto err_pci_iounmap;
314
315 return 0;
316
317err_pci_iounmap:
318 pci_iounmap(VBOX_DRM_TO_PCI_DEV(vbox->dev), vbox->vbva_buffers);
319 return ret;
320}
321
322static void vbox_accel_fini(struct vbox_private *vbox)
323{
324 vbox_disable_accel(vbox);
325 pci_iounmap(VBOX_DRM_TO_PCI_DEV(vbox->dev), vbox->vbva_buffers);
326}
327
328/** Do we support the 4.3 plus mode hint reporting interface? */
329static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
330{
331 u32 have_hints, have_cursor;
332 int ret;
333
334 ret = VBoxQueryConfHGSMI(vbox->guest_pool,
335 VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
336 &have_hints);
337 if (ret)
338 return false;
339
340 ret = VBoxQueryConfHGSMI(vbox->guest_pool,
341 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
342 &have_cursor);
343 if (ret)
344 return false;
345
346 return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
347}
348
349/**
350 * Our refresh timer call-back. Only used for guests without dirty rectangle
351 * support.
352 */
353static void vbox_refresh_timer(struct work_struct *work)
354{
355 struct vbox_private *vbox = container_of(work, struct vbox_private,
356 refresh_work.work);
357 bool have_unblanked = false;
358 struct drm_crtc *crtci;
359
360 if (!vbox->need_refresh_timer)
361 return;
362 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
363 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtci);
364 if (crtci->enabled && !vbox_crtc->blanked)
365 have_unblanked = true;
366 }
367 if (!have_unblanked)
368 return;
369 /* This forces a full refresh. */
370 vbox_enable_accel(vbox);
371 /* Schedule the next timer iteration. */
372 schedule_delayed_work(&vbox->refresh_work, VBOX_REFRESH_PERIOD);
373}
374
375static bool vbox_check_supported(u16 id)
376{
377 u16 dispi_id;
378
379 vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
380 dispi_id = inw(VBE_DISPI_IOPORT_DATA);
381
382 return dispi_id == id;
383}
384
385/**
386 * Set up our heaps and data exchange buffers in VRAM before handing the rest
387 * to the memory manager.
388 */
389static int vbox_hw_init(struct vbox_private *vbox)
390{
391 int ret = -ENOMEM;
392
393 vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
394 vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
395
396 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
397
398 /* Map guest-heap at end of vram */
399 vbox->guest_heap =
400 pci_iomap_range(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0, GUEST_HEAP_OFFSET(vbox),
401 GUEST_HEAP_SIZE);
402 if (!vbox->guest_heap)
403 return -ENOMEM;
404
405 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
406 vbox->guest_pool = gen_pool_create(4, -1);
407 if (!vbox->guest_pool)
408 goto err_unmap_guest_heap;
409
410 ret = gen_pool_add_virt(vbox->guest_pool,
411 (unsigned long)vbox->guest_heap,
412 GUEST_HEAP_OFFSET(vbox),
413 GUEST_HEAP_USABLE_SIZE, -1);
414 if (ret)
415 goto err_destroy_guest_pool;
416
417 /* Reduce available VRAM size to reflect the guest heap. */
418 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
419 /* Linux drm represents monitors as a 32-bit array. */
420 VBoxQueryConfHGSMI(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
421 &vbox->num_crtcs);
422 vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
423
424 if (!have_hgsmi_mode_hints(vbox)) {
425 ret = -ENOTSUPP;
426 goto err_destroy_guest_pool;
427 }
428
429 vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
430 sizeof(VBVAMODEHINT),
431 GFP_KERNEL);
432 if (!vbox->last_mode_hints) {
433 ret = -ENOMEM;
434 goto err_destroy_guest_pool;
435 }
436
437 ret = vbox_accel_init(vbox);
438 if (ret)
439 goto err_destroy_guest_pool;
440
441 /* Set up the refresh timer for users which do not send dirty rectangles. */
442 INIT_DELAYED_WORK(&vbox->refresh_work, vbox_refresh_timer);
443
444 return 0;
445
446err_destroy_guest_pool:
447 gen_pool_destroy(vbox->guest_pool);
448err_unmap_guest_heap:
449 pci_iounmap(VBOX_DRM_TO_PCI_DEV(vbox->dev), vbox->guest_heap);
450 return ret;
451}
452
453static void vbox_hw_fini(struct vbox_private *vbox)
454{
455 vbox->need_refresh_timer = false;
456 cancel_delayed_work(&vbox->refresh_work);
457 vbox_accel_fini(vbox);
458 gen_pool_destroy(vbox->guest_pool);
459 pci_iounmap(VBOX_DRM_TO_PCI_DEV(vbox->dev), vbox->guest_heap);
460}
461
462#if RTLNX_VER_MIN(4,19,0) || RTLNX_RHEL_MIN(8,3)
463int vbox_driver_load(struct drm_device *dev)
464#else
465int vbox_driver_load(struct drm_device *dev, unsigned long flags)
466#endif
467{
468 struct vbox_private *vbox;
469 int ret = 0;
470
471 if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
472 return -ENODEV;
473
474 vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
475 if (!vbox)
476 return -ENOMEM;
477
478 dev->dev_private = vbox;
479 vbox->dev = dev;
480
481 mutex_init(&vbox->hw_mutex);
482
483 ret = vbox_hw_init(vbox);
484 if (ret)
485 return ret;
486
487 ret = vbox_mm_init(vbox);
488 if (ret)
489 goto err_hw_fini;
490
491 drm_mode_config_init(dev);
492
493 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
494 dev->mode_config.min_width = 64;
495 dev->mode_config.min_height = 64;
496 dev->mode_config.preferred_depth = 24;
497 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
498 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
499
500 ret = vbox_mode_init(dev);
501 if (ret)
502 goto err_drm_mode_cleanup;
503
504 ret = vbox_irq_init(vbox);
505 if (ret)
506 goto err_mode_fini;
507
508 ret = vbox_fbdev_init(dev);
509 if (ret)
510 goto err_irq_fini;
511
512 return 0;
513
514err_irq_fini:
515 vbox_irq_fini(vbox);
516err_mode_fini:
517 vbox_mode_fini(dev);
518err_drm_mode_cleanup:
519 drm_mode_config_cleanup(dev);
520 vbox_mm_fini(vbox);
521err_hw_fini:
522 vbox_hw_fini(vbox);
523 return ret;
524}
525
526#if RTLNX_VER_MIN(4,11,0) || RTLNX_RHEL_MAJ_PREREQ(7,5)
527void vbox_driver_unload(struct drm_device *dev)
528#else
529int vbox_driver_unload(struct drm_device *dev)
530#endif
531{
532 struct vbox_private *vbox = dev->dev_private;
533
534 vbox_fbdev_fini(dev);
535 vbox_irq_fini(vbox);
536 vbox_mode_fini(dev);
537 drm_mode_config_cleanup(dev);
538 vbox_mm_fini(vbox);
539 vbox_hw_fini(vbox);
540#if RTLNX_VER_MAX(4,11,0) && !RTLNX_RHEL_MAJ_PREREQ(7,5)
541 return 0;
542#endif
543}
544
545/**
546 * @note this is described in the DRM framework documentation. AST does not
547 * have it, but we get an oops on driver unload if it is not present.
548 */
549void vbox_driver_lastclose(struct drm_device *dev)
550{
551 struct vbox_private *vbox = dev->dev_private;
552
553#if RTLNX_VER_MIN(3,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,1)
554 if (vbox->fbdev)
555 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
556#else
557 drm_modeset_lock_all(dev);
558 if (vbox->fbdev)
559 drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper);
560 drm_modeset_unlock_all(dev);
561#endif
562}
563
564int vbox_gem_create(struct drm_device *dev,
565 u32 size, bool iskernel, struct drm_gem_object **obj)
566{
567 struct vbox_bo *vboxbo;
568 int ret;
569
570 *obj = NULL;
571
572 size = roundup(size, PAGE_SIZE);
573 if (size == 0)
574 {
575 DRM_ERROR("bad size\n");
576 return -EINVAL;
577 }
578
579 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
580 if (ret) {
581 if (ret != -ERESTARTSYS)
582 DRM_ERROR("failed to allocate GEM object\n");
583 DRM_ERROR("failed to allocate GEM (%d)\n", ret);
584 return ret;
585 }
586
587 *obj = &vboxbo->gem;
588
589 return 0;
590}
591
592int vbox_dumb_create(struct drm_file *file,
593 struct drm_device *dev, struct drm_mode_create_dumb *args)
594{
595 int ret;
596 struct drm_gem_object *gobj;
597 u32 handle;
598
599 args->pitch = args->width * ((args->bpp + 7) / 8);
600 args->size = args->pitch * args->height;
601
602 ret = vbox_gem_create(dev, args->size, false, &gobj);
603 if (ret)
604 return ret;
605
606 ret = drm_gem_handle_create(file, gobj, &handle);
607#if RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
608 drm_gem_object_put(gobj);
609#else
610 drm_gem_object_put_unlocked(gobj);
611#endif
612 if (ret)
613 return ret;
614
615 args->handle = handle;
616
617 return 0;
618}
619
620#if RTLNX_VER_MAX(3,12,0) && !RTLNX_RHEL_MAJ_PREREQ(7,3)
621int vbox_dumb_destroy(struct drm_file *file,
622 struct drm_device *dev, u32 handle)
623{
624 return drm_gem_handle_delete(file, handle);
625}
626#endif
627
628#if RTLNX_VER_MAX(4,19,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
629static void ttm_bo_put(struct ttm_buffer_object *bo)
630{
631 ttm_bo_unref(&bo);
632}
633#endif
634
635void vbox_gem_free_object(struct drm_gem_object *obj)
636{
637 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
638
639#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
640 /* Starting from kernel 5.14, there is a warning appears in dmesg
641 * on attempt to desroy pinned buffer object. Make sure it is unpinned. */
642 while (vbox_bo->bo.pin_count)
643 {
644 int ret;
645 ret = vbox_bo_unpin(vbox_bo);
646 if (ret)
647 {
648 DRM_ERROR("unable to unpin buffer object\n");
649 break;
650 }
651 }
652#endif
653
654 ttm_bo_put(&vbox_bo->bo);
655}
656
657static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
658{
659#if RTLNX_VER_MIN(5,4,0) || RTLNX_RHEL_MIN(8,3) || RTLNX_SUSE_MAJ_PREREQ(15,3)
660 return drm_vma_node_offset_addr(&bo->bo.base.vma_node);
661#elif RTLNX_VER_MAX(3,12,0) && !RTLNX_RHEL_MAJ_PREREQ(7,0)
662 return bo->bo.addr_space_offset;
663#else
664 return drm_vma_node_offset_addr(&bo->bo.vma_node);
665#endif /* >= 5.4.0 */
666}
667
668int
669vbox_dumb_mmap_offset(struct drm_file *file,
670 struct drm_device *dev,
671 u32 handle, u64 *offset)
672{
673 struct drm_gem_object *obj;
674 int ret = 0;
675 struct vbox_bo *bo;
676
677 mutex_lock(&dev->struct_mutex);
678#if RTLNX_VER_MIN(4,7,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)
679 obj = drm_gem_object_lookup(file, handle);
680#else
681 obj = drm_gem_object_lookup(dev, file, handle);
682#endif
683 if (!obj) {
684 ret = -ENOENT;
685 goto out_unlock;
686 }
687
688 bo = gem_to_vbox_bo(obj);
689 *offset = vbox_bo_mmap_offset(bo);
690
691#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
692 ret = drm_vma_node_allow(&bo->bo.base.vma_node, file);
693 if (ret)
694 {
695 DRM_ERROR("unable to grant previladges to user");
696 }
697#endif
698
699 drm_gem_object_put(obj);
700
701out_unlock:
702 mutex_unlock(&dev->struct_mutex);
703 return ret;
704}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette