VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_main.c@ 96407

Last change on this file since 96407 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.9 KB
Line 
1/* $Id: vbox_main.c 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2022 Oracle and/or its affiliates.
8 * This file is based on ast_main.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 * Authors: Dave Airlie <airlied@redhat.com>,
32 * Michael Thayer <michael.thayer@oracle.com,
33 * Hans de Goede <hdegoede@redhat.com>
34 */
35#include "vbox_drv.h"
36#include <drm/drm_fb_helper.h>
37#include <drm/drm_crtc_helper.h>
38
39#include <VBoxVideoGuest.h>
40#include <VBoxVideoVBE.h>
41
42#include "hgsmi_channels.h"
43
44static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
45{
46 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
47
48 if (vbox_fb->obj)
49#if RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
50 drm_gem_object_put(vbox_fb->obj);
51#else
52 drm_gem_object_put_unlocked(vbox_fb->obj);
53#endif
54
55 drm_framebuffer_cleanup(fb);
56 kfree(fb);
57}
58
59void vbox_enable_accel(struct vbox_private *vbox)
60{
61 unsigned int i;
62 struct VBVABUFFER *vbva;
63
64 if (!vbox->vbva_info || !vbox->vbva_buffers) {
65 /* Should never happen... */
66 DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
67 return;
68 }
69
70 for (i = 0; i < vbox->num_crtcs; ++i) {
71 if (vbox->vbva_info[i].pVBVA)
72 continue;
73
74 vbva = (void __force *)vbox->vbva_buffers +
75 i * VBVA_MIN_BUFFER_SIZE;
76 if (!VBoxVBVAEnable(&vbox->vbva_info[i],
77 vbox->guest_pool, vbva, i)) {
78 /* very old host or driver error. */
79 DRM_ERROR("vboxvideo: vbva_enable failed\n");
80 return;
81 }
82 }
83}
84
85void vbox_disable_accel(struct vbox_private *vbox)
86{
87 unsigned int i;
88
89 for (i = 0; i < vbox->num_crtcs; ++i)
90 VBoxVBVADisable(&vbox->vbva_info[i], vbox->guest_pool, i);
91}
92
93void vbox_report_caps(struct vbox_private *vbox)
94{
95 u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
96 VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
97
98 if (vbox->initial_mode_queried)
99 caps |= VBVACAPS_VIDEO_MODE_HINTS;
100
101 VBoxHGSMISendCapsInfo(vbox->guest_pool, caps);
102}
103
104/**
105 * Send information about dirty rectangles to VBVA. If necessary we enable
106 * VBVA first, as this is normally disabled after a change of master in case
107 * the new master does not send dirty rectangle information (is this even
108 * allowed?)
109 */
110void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
111 struct drm_clip_rect *rects,
112 unsigned int num_rects)
113{
114 struct vbox_private *vbox = fb->dev->dev_private;
115 struct drm_crtc *crtc;
116 unsigned int i;
117
118 /* The user can send rectangles, we do not need the timer. */
119 vbox->need_refresh_timer = false;
120 mutex_lock(&vbox->hw_mutex);
121 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
122 if (CRTC_FB(crtc) != fb)
123 continue;
124
125 for (i = 0; i < num_rects; ++i) {
126 VBVACMDHDR cmd_hdr;
127 unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
128
129 if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
130 (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
131 (rects[i].x2 < crtc->x) ||
132 (rects[i].y2 < crtc->y))
133 continue;
134
135 cmd_hdr.x = (s16)rects[i].x1;
136 cmd_hdr.y = (s16)rects[i].y1;
137 cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
138 cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
139
140 if (!VBoxVBVABufferBeginUpdate(&vbox->vbva_info[crtc_id],
141 vbox->guest_pool))
142 continue;
143
144 VBoxVBVAWrite(&vbox->vbva_info[crtc_id], vbox->guest_pool,
145 &cmd_hdr, sizeof(cmd_hdr));
146 VBoxVBVABufferEndUpdate(&vbox->vbva_info[crtc_id]);
147 }
148 }
149 mutex_unlock(&vbox->hw_mutex);
150}
151
152static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
153 struct drm_file *file_priv,
154 unsigned int flags, unsigned int color,
155 struct drm_clip_rect *rects,
156 unsigned int num_rects)
157{
158 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
159
160 return 0;
161}
162
163static const struct drm_framebuffer_funcs vbox_fb_funcs = {
164 .destroy = vbox_user_framebuffer_destroy,
165 .dirty = vbox_user_framebuffer_dirty,
166};
167
168int vbox_framebuffer_init(struct drm_device *dev,
169 struct vbox_framebuffer *vbox_fb,
170#if RTLNX_VER_MIN(4,5,0) || RTLNX_RHEL_MAJ_PREREQ(7,3)
171 const struct DRM_MODE_FB_CMD *mode_cmd,
172#else
173 struct DRM_MODE_FB_CMD *mode_cmd,
174#endif
175 struct drm_gem_object *obj)
176{
177 int ret;
178
179#if RTLNX_VER_MIN(4,11,0) || RTLNX_RHEL_MAJ_PREREQ(7,5)
180 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
181#else
182 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
183#endif
184 vbox_fb->obj = obj;
185 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
186 if (ret) {
187 DRM_ERROR("framebuffer init failed %d\n", ret);
188 return ret;
189 }
190
191 return 0;
192}
193
194static struct drm_framebuffer *vbox_user_framebuffer_create(
195 struct drm_device *dev,
196 struct drm_file *filp,
197#if RTLNX_VER_MIN(4,5,0) || RTLNX_RHEL_MAJ_PREREQ(7,3)
198 const struct drm_mode_fb_cmd2 *mode_cmd)
199#else
200 struct drm_mode_fb_cmd2 *mode_cmd)
201#endif
202{
203 struct drm_gem_object *obj;
204 struct vbox_framebuffer *vbox_fb;
205 int ret = -ENOMEM;
206
207#if RTLNX_VER_MIN(4,7,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)
208 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
209#else
210 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
211#endif
212 if (!obj)
213 return ERR_PTR(-ENOENT);
214
215 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
216 if (!vbox_fb)
217 goto err_unref_obj;
218
219 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
220 if (ret)
221 goto err_free_vbox_fb;
222
223 return &vbox_fb->base;
224
225err_free_vbox_fb:
226 kfree(vbox_fb);
227err_unref_obj:
228#if RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
229 drm_gem_object_put(obj);
230#else
231 drm_gem_object_put_unlocked(obj);
232#endif
233 return ERR_PTR(ret);
234}
235
236static const struct drm_mode_config_funcs vbox_mode_funcs = {
237 .fb_create = vbox_user_framebuffer_create,
238};
239
240#if RTLNX_VER_MAX(4,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,3)
241# define pci_iomap_range(dev, bar, offset, maxlen) \
242 ioremap(pci_resource_start(dev, bar) + (offset), maxlen)
243#endif
244
245/**
246 * Tell the host about the views. This design originally targeted the
247 * Windows XP driver architecture and assumed that each screen would
248 * have a dedicated frame buffer with the command buffer following it,
249 * the whole being a "view". The host works out which screen a command
250 * buffer belongs to by checking whether it is in the first view, then
251 * whether it is in the second and so on. The first match wins. We
252 * cheat around this by making the first view be the managed memory
253 * plus the first command buffer, the second the same plus the second
254 * buffer and so on.
255 */
256static int vbox_set_views(struct vbox_private *vbox)
257{
258 VBVAINFOVIEW *p;
259 int i;
260
261 p = VBoxHGSMIBufferAlloc(vbox->guest_pool, sizeof(*p),
262 HGSMI_CH_VBVA, VBVA_INFO_VIEW);
263 if (!p)
264 return -ENOMEM;
265
266 for (i = 0; i < vbox->num_crtcs; ++i) {
267 p->u32ViewIndex = i;
268 p->u32ViewOffset = 0;
269 p->u32ViewSize = vbox->available_vram_size +
270 i * VBVA_MIN_BUFFER_SIZE;
271 p->u32MaxScreenSize = vbox->available_vram_size;
272
273 VBoxHGSMIBufferSubmit(vbox->guest_pool, p);
274 }
275
276 VBoxHGSMIBufferFree(vbox->guest_pool, p);
277
278 return 0;
279}
280
281static int vbox_accel_init(struct vbox_private *vbox)
282{
283 unsigned int i, ret;
284
285 vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
286 sizeof(*vbox->vbva_info), GFP_KERNEL);
287 if (!vbox->vbva_info)
288 return -ENOMEM;
289
290 /* Take a command buffer for each screen from the end of usable VRAM. */
291 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
292
293 vbox->vbva_buffers = pci_iomap_range(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0,
294 vbox->available_vram_size,
295 vbox->num_crtcs *
296 VBVA_MIN_BUFFER_SIZE);
297 if (!vbox->vbva_buffers)
298 return -ENOMEM;
299
300 for (i = 0; i < vbox->num_crtcs; ++i)
301 VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
302 vbox->available_vram_size +
303 i * VBVA_MIN_BUFFER_SIZE,
304 VBVA_MIN_BUFFER_SIZE);
305
306 vbox_enable_accel(vbox);
307 ret = vbox_set_views(vbox);
308 if (ret)
309 goto err_pci_iounmap;
310
311 return 0;
312
313err_pci_iounmap:
314 pci_iounmap(VBOX_DRM_TO_PCI_DEV(vbox->dev), vbox->vbva_buffers);
315 return ret;
316}
317
318static void vbox_accel_fini(struct vbox_private *vbox)
319{
320 vbox_disable_accel(vbox);
321 pci_iounmap(VBOX_DRM_TO_PCI_DEV(vbox->dev), vbox->vbva_buffers);
322}
323
324/** Do we support the 4.3 plus mode hint reporting interface? */
325static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
326{
327 u32 have_hints, have_cursor;
328 int ret;
329
330 ret = VBoxQueryConfHGSMI(vbox->guest_pool,
331 VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
332 &have_hints);
333 if (ret)
334 return false;
335
336 ret = VBoxQueryConfHGSMI(vbox->guest_pool,
337 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
338 &have_cursor);
339 if (ret)
340 return false;
341
342 return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
343}
344
345/**
346 * Our refresh timer call-back. Only used for guests without dirty rectangle
347 * support.
348 */
349static void vbox_refresh_timer(struct work_struct *work)
350{
351 struct vbox_private *vbox = container_of(work, struct vbox_private,
352 refresh_work.work);
353 bool have_unblanked = false;
354 struct drm_crtc *crtci;
355
356 if (!vbox->need_refresh_timer)
357 return;
358 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
359 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtci);
360 if (crtci->enabled && !vbox_crtc->blanked)
361 have_unblanked = true;
362 }
363 if (!have_unblanked)
364 return;
365 /* This forces a full refresh. */
366 vbox_enable_accel(vbox);
367 /* Schedule the next timer iteration. */
368 schedule_delayed_work(&vbox->refresh_work, VBOX_REFRESH_PERIOD);
369}
370
371static bool vbox_check_supported(u16 id)
372{
373 u16 dispi_id;
374
375 vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
376 dispi_id = inw(VBE_DISPI_IOPORT_DATA);
377
378 return dispi_id == id;
379}
380
381/**
382 * Set up our heaps and data exchange buffers in VRAM before handing the rest
383 * to the memory manager.
384 */
385static int vbox_hw_init(struct vbox_private *vbox)
386{
387 int ret = -ENOMEM;
388
389 vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
390 vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
391
392 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
393
394 /* Map guest-heap at end of vram */
395 vbox->guest_heap =
396 pci_iomap_range(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0, GUEST_HEAP_OFFSET(vbox),
397 GUEST_HEAP_SIZE);
398 if (!vbox->guest_heap)
399 return -ENOMEM;
400
401 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
402 vbox->guest_pool = gen_pool_create(4, -1);
403 if (!vbox->guest_pool)
404 goto err_unmap_guest_heap;
405
406 ret = gen_pool_add_virt(vbox->guest_pool,
407 (unsigned long)vbox->guest_heap,
408 GUEST_HEAP_OFFSET(vbox),
409 GUEST_HEAP_USABLE_SIZE, -1);
410 if (ret)
411 goto err_destroy_guest_pool;
412
413 /* Reduce available VRAM size to reflect the guest heap. */
414 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
415 /* Linux drm represents monitors as a 32-bit array. */
416 VBoxQueryConfHGSMI(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
417 &vbox->num_crtcs);
418 vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
419
420 if (!have_hgsmi_mode_hints(vbox)) {
421 ret = -ENOTSUPP;
422 goto err_destroy_guest_pool;
423 }
424
425 vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
426 sizeof(VBVAMODEHINT),
427 GFP_KERNEL);
428 if (!vbox->last_mode_hints) {
429 ret = -ENOMEM;
430 goto err_destroy_guest_pool;
431 }
432
433 ret = vbox_accel_init(vbox);
434 if (ret)
435 goto err_destroy_guest_pool;
436
437 /* Set up the refresh timer for users which do not send dirty rectangles. */
438 INIT_DELAYED_WORK(&vbox->refresh_work, vbox_refresh_timer);
439
440 return 0;
441
442err_destroy_guest_pool:
443 gen_pool_destroy(vbox->guest_pool);
444err_unmap_guest_heap:
445 pci_iounmap(VBOX_DRM_TO_PCI_DEV(vbox->dev), vbox->guest_heap);
446 return ret;
447}
448
449static void vbox_hw_fini(struct vbox_private *vbox)
450{
451 vbox->need_refresh_timer = false;
452 cancel_delayed_work(&vbox->refresh_work);
453 vbox_accel_fini(vbox);
454 gen_pool_destroy(vbox->guest_pool);
455 pci_iounmap(VBOX_DRM_TO_PCI_DEV(vbox->dev), vbox->guest_heap);
456}
457
458#if RTLNX_VER_MIN(4,19,0) || RTLNX_RHEL_MIN(8,3)
459int vbox_driver_load(struct drm_device *dev)
460#else
461int vbox_driver_load(struct drm_device *dev, unsigned long flags)
462#endif
463{
464 struct vbox_private *vbox;
465 int ret = 0;
466
467 if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
468 return -ENODEV;
469
470 vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
471 if (!vbox)
472 return -ENOMEM;
473
474 dev->dev_private = vbox;
475 vbox->dev = dev;
476
477 mutex_init(&vbox->hw_mutex);
478
479 ret = vbox_hw_init(vbox);
480 if (ret)
481 return ret;
482
483 ret = vbox_mm_init(vbox);
484 if (ret)
485 goto err_hw_fini;
486
487 drm_mode_config_init(dev);
488
489 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
490 dev->mode_config.min_width = 64;
491 dev->mode_config.min_height = 64;
492 dev->mode_config.preferred_depth = 24;
493 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
494 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
495
496 ret = vbox_mode_init(dev);
497 if (ret)
498 goto err_drm_mode_cleanup;
499
500 ret = vbox_irq_init(vbox);
501 if (ret)
502 goto err_mode_fini;
503
504 ret = vbox_fbdev_init(dev);
505 if (ret)
506 goto err_irq_fini;
507
508 return 0;
509
510err_irq_fini:
511 vbox_irq_fini(vbox);
512err_mode_fini:
513 vbox_mode_fini(dev);
514err_drm_mode_cleanup:
515 drm_mode_config_cleanup(dev);
516 vbox_mm_fini(vbox);
517err_hw_fini:
518 vbox_hw_fini(vbox);
519 return ret;
520}
521
522#if RTLNX_VER_MIN(4,11,0) || RTLNX_RHEL_MAJ_PREREQ(7,5)
523void vbox_driver_unload(struct drm_device *dev)
524#else
525int vbox_driver_unload(struct drm_device *dev)
526#endif
527{
528 struct vbox_private *vbox = dev->dev_private;
529
530 vbox_fbdev_fini(dev);
531 vbox_irq_fini(vbox);
532 vbox_mode_fini(dev);
533 drm_mode_config_cleanup(dev);
534 vbox_mm_fini(vbox);
535 vbox_hw_fini(vbox);
536#if RTLNX_VER_MAX(4,11,0) && !RTLNX_RHEL_MAJ_PREREQ(7,5)
537 return 0;
538#endif
539}
540
541/**
542 * @note this is described in the DRM framework documentation. AST does not
543 * have it, but we get an oops on driver unload if it is not present.
544 */
545void vbox_driver_lastclose(struct drm_device *dev)
546{
547 struct vbox_private *vbox = dev->dev_private;
548
549#if RTLNX_VER_MIN(3,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,1)
550 if (vbox->fbdev)
551 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
552#else
553 drm_modeset_lock_all(dev);
554 if (vbox->fbdev)
555 drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper);
556 drm_modeset_unlock_all(dev);
557#endif
558}
559
560int vbox_gem_create(struct drm_device *dev,
561 u32 size, bool iskernel, struct drm_gem_object **obj)
562{
563 struct vbox_bo *vboxbo;
564 int ret;
565
566 *obj = NULL;
567
568 size = roundup(size, PAGE_SIZE);
569 if (size == 0)
570 {
571 DRM_ERROR("bad size\n");
572 return -EINVAL;
573 }
574
575 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
576 if (ret) {
577 if (ret != -ERESTARTSYS)
578 DRM_ERROR("failed to allocate GEM object\n");
579 DRM_ERROR("failed to allocate GEM (%d)\n", ret);
580 return ret;
581 }
582
583 *obj = &vboxbo->gem;
584
585 return 0;
586}
587
588int vbox_dumb_create(struct drm_file *file,
589 struct drm_device *dev, struct drm_mode_create_dumb *args)
590{
591 int ret;
592 struct drm_gem_object *gobj;
593 u32 handle;
594
595 args->pitch = args->width * ((args->bpp + 7) / 8);
596 args->size = args->pitch * args->height;
597
598 ret = vbox_gem_create(dev, args->size, false, &gobj);
599 if (ret)
600 return ret;
601
602 ret = drm_gem_handle_create(file, gobj, &handle);
603#if RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
604 drm_gem_object_put(gobj);
605#else
606 drm_gem_object_put_unlocked(gobj);
607#endif
608 if (ret)
609 return ret;
610
611 args->handle = handle;
612
613 return 0;
614}
615
616#if RTLNX_VER_MAX(3,12,0) && !RTLNX_RHEL_MAJ_PREREQ(7,3)
617int vbox_dumb_destroy(struct drm_file *file,
618 struct drm_device *dev, u32 handle)
619{
620 return drm_gem_handle_delete(file, handle);
621}
622#endif
623
624#if RTLNX_VER_MAX(4,19,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
625static void ttm_bo_put(struct ttm_buffer_object *bo)
626{
627 ttm_bo_unref(&bo);
628}
629#endif
630
631void vbox_gem_free_object(struct drm_gem_object *obj)
632{
633 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
634
635#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
636 /* Starting from kernel 5.14, there is a warning appears in dmesg
637 * on attempt to desroy pinned buffer object. Make sure it is unpinned. */
638 while (vbox_bo->bo.pin_count)
639 {
640 int ret;
641 ret = vbox_bo_unpin(vbox_bo);
642 if (ret)
643 {
644 DRM_ERROR("unable to unpin buffer object\n");
645 break;
646 }
647 }
648#endif
649
650 ttm_bo_put(&vbox_bo->bo);
651}
652
653static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
654{
655#if RTLNX_VER_MIN(5,4,0) || RTLNX_RHEL_MIN(8,3) || RTLNX_SUSE_MAJ_PREREQ(15,3)
656 return drm_vma_node_offset_addr(&bo->bo.base.vma_node);
657#elif RTLNX_VER_MAX(3,12,0) && !RTLNX_RHEL_MAJ_PREREQ(7,0)
658 return bo->bo.addr_space_offset;
659#else
660 return drm_vma_node_offset_addr(&bo->bo.vma_node);
661#endif /* >= 5.4.0 */
662}
663
664int
665vbox_dumb_mmap_offset(struct drm_file *file,
666 struct drm_device *dev,
667 u32 handle, u64 *offset)
668{
669 struct drm_gem_object *obj;
670 int ret = 0;
671 struct vbox_bo *bo;
672
673 mutex_lock(&dev->struct_mutex);
674#if RTLNX_VER_MIN(4,7,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)
675 obj = drm_gem_object_lookup(file, handle);
676#else
677 obj = drm_gem_object_lookup(dev, file, handle);
678#endif
679 if (!obj) {
680 ret = -ENOENT;
681 goto out_unlock;
682 }
683
684 bo = gem_to_vbox_bo(obj);
685 *offset = vbox_bo_mmap_offset(bo);
686
687#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
688 ret = drm_vma_node_allow(&bo->bo.base.vma_node, file);
689 if (ret)
690 {
691 DRM_ERROR("unable to grant previladges to user");
692 }
693#endif
694
695 drm_gem_object_put(obj);
696
697out_unlock:
698 mutex_unlock(&dev->struct_mutex);
699 return ret;
700}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette