VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_ttm.c@ 100765

Last change on this file since 100765 was 100677, checked in by vboxsync, 18 months ago

Additions: Linux: vboxvideo: Add initial support for OpenSUSE 15.5 kernel, bugref:10491.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.9 KB
Line 
1/* $Id: vbox_ttm.c 100677 2023-07-21 13:27:08Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2023 Oracle and/or its affiliates.
8 * This file is based on ast_ttm.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 *
32 * Authors: Dave Airlie <airlied@redhat.com>
33 * Michael Thayer <michael.thayer@oracle.com>
34 */
35#include "vbox_drv.h"
36
37#if RTLNX_VER_MIN(6,3,0) || RTLNX_RHEL_MAJ_PREREQ(9,3)
38# include <drm/ttm/ttm_tt.h>
39#endif
40
41#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_MAJ_PREREQ(8,5)
42# include <drm/drm_gem.h>
43# include <drm/drm_gem_ttm_helper.h>
44# include <drm/drm_gem_vram_helper.h>
45#else
46# include <drm/ttm/ttm_page_alloc.h>
47#endif
48
49#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
50# include <drm/ttm/ttm_range_manager.h>
51#endif
52
53#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
54#define PLACEMENT_FLAGS(placement) (placement)
55#else
56#define PLACEMENT_FLAGS(placement) ((placement).flags)
57#endif
58
59
60#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
61static inline struct vbox_private *vbox_bdev(struct ttm_device *bd)
62#else
63static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
64#endif
65{
66 return container_of(bd, struct vbox_private, ttm.bdev);
67}
68
69#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
70static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
71{
72 return ttm_mem_global_init(ref->object);
73}
74
75static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
76{
77 ttm_mem_global_release(ref->object);
78}
79
80/**
81 * Adds the vbox memory manager object/structures to the global memory manager.
82 */
83static int vbox_ttm_global_init(struct vbox_private *vbox)
84{
85 struct drm_global_reference *global_ref;
86 int ret;
87
88#if RTLNX_VER_MAX(5,0,0)
89 global_ref = &vbox->ttm.mem_global_ref;
90 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
91 global_ref->size = sizeof(struct ttm_mem_global);
92 global_ref->init = &vbox_ttm_mem_global_init;
93 global_ref->release = &vbox_ttm_mem_global_release;
94 ret = drm_global_item_ref(global_ref);
95 if (ret) {
96 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
97 return ret;
98 }
99
100 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
101#endif
102 global_ref = &vbox->ttm.bo_global_ref.ref;
103 global_ref->global_type = DRM_GLOBAL_TTM_BO;
104 global_ref->size = sizeof(struct ttm_bo_global);
105 global_ref->init = &ttm_bo_global_init;
106 global_ref->release = &ttm_bo_global_release;
107
108 ret = drm_global_item_ref(global_ref);
109 if (ret) {
110 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
111#if RTLNX_VER_MAX(5,0,0)
112 drm_global_item_unref(&vbox->ttm.mem_global_ref);
113#endif
114 return ret;
115 }
116
117 return 0;
118}
119
120/**
121 * Removes the vbox memory manager object from the global memory manager.
122 */
123static void vbox_ttm_global_release(struct vbox_private *vbox)
124{
125 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
126 drm_global_item_unref(&vbox->ttm.mem_global_ref);
127}
128#endif
129
130static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
131{
132 struct vbox_bo *bo;
133
134 bo = container_of(tbo, struct vbox_bo, bo);
135
136 drm_gem_object_release(&bo->gem);
137 kfree(bo);
138}
139
140static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
141{
142 if (bo->destroy == &vbox_bo_ttm_destroy)
143 return true;
144
145 return false;
146}
147
148#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
149static int
150vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
151 struct ttm_mem_type_manager *man)
152{
153 switch (type) {
154 case TTM_PL_SYSTEM:
155 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
156 man->available_caching = TTM_PL_MASK_CACHING;
157 man->default_caching = TTM_PL_FLAG_CACHED;
158 break;
159 case TTM_PL_VRAM:
160 man->func = &ttm_bo_manager_func;
161 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
162 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
163 man->default_caching = TTM_PL_FLAG_WC;
164 break;
165 default:
166 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
167 return -EINVAL;
168 }
169
170 return 0;
171}
172#endif
173
174static void
175vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
176{
177 struct vbox_bo *vboxbo = vbox_bo(bo);
178
179 if (!vbox_ttm_bo_is_vbox_bo(bo))
180 return;
181
182 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_SYSTEM);
183 *pl = vboxbo->placement;
184}
185
186#if RTLNX_VER_MAX(5,14,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
187static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
188 struct file *filp)
189{
190 return 0;
191}
192#endif
193
194#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
195static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
196 struct ttm_mem_reg *mem)
197{
198 struct vbox_private *vbox = vbox_bdev(bdev);
199 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
200
201 mem->bus.addr = NULL;
202 mem->bus.offset = 0;
203 mem->bus.size = mem->num_pages << PAGE_SHIFT;
204 mem->bus.base = 0;
205 mem->bus.is_iomem = false;
206 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
207 return -EINVAL;
208 switch (mem->mem_type) {
209 case TTM_PL_SYSTEM:
210 /* system memory */
211 return 0;
212 case TTM_PL_VRAM:
213 mem->bus.offset = mem->start << PAGE_SHIFT;
214 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
215 mem->bus.is_iomem = true;
216 break;
217 default:
218 return -EINVAL;
219 }
220 return 0;
221}
222#else
223# if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
224static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
225 struct ttm_resource *mem)
226# else /* > 5.13.0 */
227static int vbox_ttm_io_mem_reserve(struct ttm_device *bdev,
228 struct ttm_resource *mem)
229# endif /* > 5.13.0 */
230{
231 struct vbox_private *vbox = vbox_bdev(bdev);
232 mem->bus.addr = NULL;
233 mem->bus.offset = 0;
234# if RTLNX_VER_MAX(5,12,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
235 mem->size = mem->num_pages << PAGE_SHIFT;
236# endif
237 mem->start = 0;
238 mem->bus.is_iomem = false;
239 switch (mem->mem_type) {
240 case TTM_PL_SYSTEM:
241 /* system memory */
242 return 0;
243 case TTM_PL_VRAM:
244# if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
245 mem->bus.caching = ttm_write_combined;
246# endif
247# if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
248 mem->bus.offset = (mem->start << PAGE_SHIFT) + pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0);
249# else
250 mem->bus.offset = mem->start << PAGE_SHIFT;
251 mem->start = pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0);
252# endif
253 mem->bus.is_iomem = true;
254 break;
255 default:
256 return -EINVAL;
257 }
258 return 0;
259}
260#endif
261
262
263
264#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
265static void vbox_ttm_io_mem_free(struct ttm_device *bdev,
266 struct ttm_resource *mem)
267{
268}
269#elif RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
270static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
271 struct ttm_resource *mem)
272{
273}
274#else
275static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
276 struct ttm_mem_reg *mem)
277{
278}
279#endif
280
281#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
282static void vbox_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
283{
284 ttm_tt_fini(tt);
285 kfree(tt);
286}
287#elif RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
288static void vbox_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
289{
290 ttm_tt_fini(tt);
291 kfree(tt);
292}
293#else
294static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
295{
296 ttm_tt_fini(tt);
297 kfree(tt);
298}
299
300static struct ttm_backend_func vbox_tt_backend_func = {
301 .destroy = &vbox_ttm_backend_destroy,
302};
303#endif
304
305#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
306static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
307 unsigned long size,
308 u32 page_flags,
309 struct page *dummy_read_page)
310#else
311static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
312 u32 page_flags)
313#endif
314{
315 struct ttm_tt *tt;
316
317 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
318 if (!tt)
319 return NULL;
320
321#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
322 tt->func = &vbox_tt_backend_func;
323#endif
324#if RTLNX_VER_MIN(5,19,0) || RTLNX_RHEL_RANGE(8,8, 8,99) || RTLNX_RHEL_MAJ_PREREQ(9,2) || RTLNX_SUSE_MAJ_PREREQ(15,5)
325 if (ttm_tt_init(tt, bo, page_flags, ttm_write_combined, 0)) {
326#elif RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
327 if (ttm_tt_init(tt, bo, page_flags, ttm_write_combined)) {
328#elif RTLNX_VER_MIN(4,17,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
329 if (ttm_tt_init(tt, bo, page_flags)) {
330#else
331 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
332#endif
333
334 kfree(tt);
335 return NULL;
336 }
337
338 return tt;
339}
340
341#if RTLNX_VER_MAX(4,17,0)
342# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
343static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
344{
345 return ttm_pool_populate(ttm);
346}
347# else
348static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
349 struct ttm_operation_ctx *ctx)
350{
351 return ttm_pool_populate(ttm, ctx);
352}
353# endif
354
355static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
356{
357 ttm_pool_unpopulate(ttm);
358}
359#endif
360
361#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
362static int vbox_bo_move(struct ttm_buffer_object *bo, bool evict,
363 struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem,
364 struct ttm_place *hop)
365{
366 return ttm_bo_move_memcpy(bo, ctx, new_mem);
367}
368#endif
369
370#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
371static struct ttm_device_funcs vbox_bo_driver = {
372#else /* < 5.13.0 */
373static struct ttm_bo_driver vbox_bo_driver = {
374#endif /* < 5.13.0 */
375 .ttm_tt_create = vbox_ttm_tt_create,
376#if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
377 .ttm_tt_destroy = vbox_ttm_tt_destroy,
378#endif
379#if RTLNX_VER_MAX(4,17,0)
380 .ttm_tt_populate = vbox_ttm_tt_populate,
381 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
382#endif
383#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
384 .init_mem_type = vbox_bo_init_mem_type,
385#endif
386#if RTLNX_VER_MIN(4,10,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)
387 .eviction_valuable = ttm_bo_eviction_valuable,
388#endif
389 .evict_flags = vbox_bo_evict_flags,
390#if RTLNX_VER_MAX(5,14,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
391 .verify_access = vbox_bo_verify_access,
392#endif
393 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
394 .io_mem_free = &vbox_ttm_io_mem_free,
395#if RTLNX_VER_MIN(4,12,0) || RTLNX_RHEL_MAJ_PREREQ(7,5)
396# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
397 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
398# endif
399#endif
400#if (RTLNX_VER_RANGE(4,7,0, 4,11,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)) && !RTLNX_RHEL_MAJ_PREREQ(7,5)
401 .lru_tail = &ttm_bo_default_lru_tail,
402 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
403#endif
404#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
405 .move = &vbox_bo_move,
406#endif
407};
408
409int vbox_mm_init(struct vbox_private *vbox)
410{
411 int ret;
412 struct drm_device *dev = vbox->dev;
413#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
414 struct ttm_device *bdev = &vbox->ttm.bdev;
415#else
416 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
417#endif
418
419#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
420 ret = vbox_ttm_global_init(vbox);
421 if (ret)
422 return ret;
423#endif
424#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
425 ret = ttm_device_init(&vbox->ttm.bdev,
426#else
427 ret = ttm_bo_device_init(&vbox->ttm.bdev,
428#endif
429#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
430 vbox->ttm.bo_global_ref.ref.object,
431#endif
432 &vbox_bo_driver,
433#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
434 dev->dev,
435#endif
436#if RTLNX_VER_MIN(3,15,0) || RTLNX_RHEL_MAJ_PREREQ(7,1)
437 dev->anon_inode->i_mapping,
438#endif
439#if RTLNX_VER_MIN(5,5,0) || RTLNX_RHEL_MIN(8,3) || RTLNX_SUSE_MAJ_PREREQ(15,3)
440 dev->vma_offset_manager,
441#elif RTLNX_VER_MAX(5,2,0) && !RTLNX_RHEL_MAJ_PREREQ(8,2)
442 DRM_FILE_PAGE_OFFSET,
443#endif
444#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
445 false,
446#endif
447 true);
448 if (ret) {
449 DRM_ERROR("Error initialising bo driver; %d\n", ret);
450#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
451 goto err_ttm_global_release;
452#else
453 return ret;
454#endif
455 }
456
457#if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
458 ret = ttm_range_man_init(bdev, TTM_PL_VRAM, false,
459 vbox->available_vram_size >> PAGE_SHIFT);
460#else
461 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
462 vbox->available_vram_size >> PAGE_SHIFT);
463#endif
464 if (ret) {
465 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
466 goto err_device_release;
467 }
468
469#ifdef DRM_MTRR_WC
470 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(VBOX_DRM_TO_PCI_DEV(dev), 0),
471 pci_resource_len(VBOX_DRM_TO_PCI_DEV(dev), 0),
472 DRM_MTRR_WC);
473#else
474 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(VBOX_DRM_TO_PCI_DEV(dev), 0),
475 pci_resource_len(VBOX_DRM_TO_PCI_DEV(dev), 0));
476#endif
477 return 0;
478
479err_device_release:
480#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
481 ttm_device_fini(&vbox->ttm.bdev);
482#else
483 ttm_bo_device_release(&vbox->ttm.bdev);
484#endif
485#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
486err_ttm_global_release:
487 vbox_ttm_global_release(vbox);
488#endif
489 return ret;
490}
491
492void vbox_mm_fini(struct vbox_private *vbox)
493{
494#ifdef DRM_MTRR_WC
495 drm_mtrr_del(vbox->fb_mtrr,
496 pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0),
497 pci_resource_len(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0), DRM_MTRR_WC);
498#else
499 arch_phys_wc_del(vbox->fb_mtrr);
500#endif
501#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
502 ttm_device_fini(&vbox->ttm.bdev);
503#else
504 ttm_bo_device_release(&vbox->ttm.bdev);
505#endif
506#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
507 vbox_ttm_global_release(vbox);
508#endif
509}
510
511void vbox_ttm_placement(struct vbox_bo *bo, u32 mem_type)
512{
513 u32 c = 0;
514#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
515 bo->placement.fpfn = 0;
516 bo->placement.lpfn = 0;
517#else
518 unsigned int i;
519#endif
520
521 bo->placement.placement = bo->placements;
522 bo->placement.busy_placement = bo->placements;
523
524 if (mem_type & VBOX_MEM_TYPE_VRAM) {
525#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
526 bo->placements[c].mem_type = TTM_PL_VRAM;
527 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
528#elif RTLNX_VER_MIN(5,10,0)
529 bo->placements[c].mem_type = TTM_PL_VRAM;
530 PLACEMENT_FLAGS(bo->placements[c++]) =
531 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
532#else
533 PLACEMENT_FLAGS(bo->placements[c++]) =
534 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
535#endif
536 }
537 if (mem_type & VBOX_MEM_TYPE_SYSTEM) {
538#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
539 bo->placements[c].mem_type = TTM_PL_SYSTEM;
540 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
541#elif RTLNX_VER_MIN(5,10,0)
542 bo->placements[c].mem_type = TTM_PL_SYSTEM;
543 PLACEMENT_FLAGS(bo->placements[c++]) =
544 TTM_PL_MASK_CACHING;
545#else
546 PLACEMENT_FLAGS(bo->placements[c++]) =
547 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
548#endif
549 }
550 if (!c) {
551#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
552 bo->placements[c].mem_type = TTM_PL_SYSTEM;
553 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
554#elif RTLNX_VER_MIN(5,10,0)
555 bo->placements[c].mem_type = TTM_PL_SYSTEM;
556 PLACEMENT_FLAGS(bo->placements[c++]) =
557 TTM_PL_MASK_CACHING;
558#else
559 PLACEMENT_FLAGS(bo->placements[c++]) =
560 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
561#endif
562 }
563
564 bo->placement.num_placement = c;
565 bo->placement.num_busy_placement = c;
566
567#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
568 for (i = 0; i < c; ++i) {
569 bo->placements[i].fpfn = 0;
570 bo->placements[i].lpfn = 0;
571 }
572#endif
573}
574
575#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
576static const struct drm_gem_object_funcs vbox_drm_gem_object_funcs = {
577 .free = vbox_gem_free_object,
578 .print_info = drm_gem_ttm_print_info,
579# if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
580 .mmap = drm_gem_ttm_mmap,
581# endif
582};
583#endif
584
585int vbox_bo_create(struct drm_device *dev, int size, int align,
586 u32 flags, struct vbox_bo **pvboxbo)
587{
588 struct vbox_private *vbox = dev->dev_private;
589 struct vbox_bo *vboxbo;
590#if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
591 size_t acc_size;
592#endif
593 int ret;
594
595 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
596 if (!vboxbo)
597 return -ENOMEM;
598
599 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
600 if (ret)
601 goto err_free_vboxbo;
602
603#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
604 if (!vboxbo->gem.funcs) {
605 vboxbo->gem.funcs = &vbox_drm_gem_object_funcs;
606 }
607#endif
608 vboxbo->bo.bdev = &vbox->ttm.bdev;
609#if RTLNX_VER_MAX(3,15,0) && !RTLNX_RHEL_MAJ_PREREQ(7,1)
610 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
611#endif
612
613 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_VRAM | VBOX_MEM_TYPE_SYSTEM);
614
615#if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
616 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
617 sizeof(struct vbox_bo));
618#endif
619
620#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
621 /* Initialization of the following was removed from DRM stack
622 * in 5.14, so we need to do it manually. */
623 vboxbo->bo.base.funcs = &vbox_drm_gem_object_funcs;
624 kref_init(&vboxbo->bo.base.refcount);
625 vboxbo->bo.base.size = size;
626 vboxbo->bo.base.dev = dev;
627 dma_resv_init(&vboxbo->bo.base._resv);
628 drm_vma_node_reset(&vboxbo->bo.base.vma_node);
629#endif
630
631#if RTLNX_VER_MIN(6,1,0) || RTLNX_RHEL_MAJ_PREREQ(9,3) || RTLNX_SUSE_MAJ_PREREQ(15,5)
632 ret = ttm_bo_init_validate(&vbox->ttm.bdev, &vboxbo->bo,
633#else
634 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
635#endif /* < 6.1.0 */
636 ttm_bo_type_device, &vboxbo->placement,
637#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
638 align >> PAGE_SHIFT, false, NULL, acc_size,
639#elif RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99) /* < 5.13.0, < RHEL(8.6, 8.99) */
640 align >> PAGE_SHIFT, false, acc_size,
641#else /* > 5.13.0 */
642 align >> PAGE_SHIFT, false,
643#endif /* > 5.13.0 */
644#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
645 NULL, NULL, vbox_bo_ttm_destroy);
646#else
647 NULL, vbox_bo_ttm_destroy);
648#endif
649 if (ret)
650 {
651 /* In case of failure, ttm_bo_init() supposed to call
652 * vbox_bo_ttm_destroy() which in turn will free @vboxbo. */
653 goto err_exit;
654 }
655
656 *pvboxbo = vboxbo;
657
658 return 0;
659
660err_free_vboxbo:
661 kfree(vboxbo);
662err_exit:
663 return ret;
664}
665
666static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
667{
668#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
669 return bo->bo.resource->start << PAGE_SHIFT;
670#elif RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
671 return bo->bo.mem.start << PAGE_SHIFT;
672#else
673 return bo->bo.offset;
674#endif
675}
676
677int vbox_bo_pin(struct vbox_bo *bo, u32 mem_type, u64 *gpu_addr)
678{
679#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
680 struct ttm_operation_ctx ctx = { false, false };
681#endif
682 int ret;
683#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
684 int i;
685#endif
686
687 if (bo->pin_count) {
688 bo->pin_count++;
689 if (gpu_addr)
690 *gpu_addr = vbox_bo_gpu_offset(bo);
691
692 return 0;
693 }
694
695 vbox_ttm_placement(bo, mem_type);
696
697#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
698 for (i = 0; i < bo->placement.num_placement; i++)
699 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
700#endif
701
702#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
703 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
704#else
705 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
706#endif
707 if (ret)
708 return ret;
709
710 bo->pin_count = 1;
711
712#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
713 ttm_bo_pin(&bo->bo);
714#endif
715
716 if (gpu_addr)
717 *gpu_addr = vbox_bo_gpu_offset(bo);
718
719 return 0;
720}
721
722int vbox_bo_unpin(struct vbox_bo *bo)
723{
724#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
725# if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
726 struct ttm_operation_ctx ctx = { false, false };
727# endif
728#endif
729 int ret = 0;
730#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
731 int i;
732#endif
733
734 if (!bo->pin_count) {
735 DRM_ERROR("unpin bad %p\n", bo);
736 return 0;
737 }
738 bo->pin_count--;
739 if (bo->pin_count)
740 return 0;
741
742#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
743 for (i = 0; i < bo->placement.num_placement; i++)
744 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
745#endif
746
747#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
748 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
749#elif RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
750 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
751#endif
752 if (ret)
753 return ret;
754
755#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
756 ttm_bo_unpin(&bo->bo);
757#endif
758
759 return 0;
760}
761
762#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
763/*
764 * Move a vbox-owned buffer object to system memory if no one else has it
765 * pinned. The caller must have pinned it previously, and this call will
766 * release the caller's pin.
767 */
768int vbox_bo_push_sysram(struct vbox_bo *bo)
769{
770# if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
771 struct ttm_operation_ctx ctx = { false, false };
772# endif
773 int i, ret;
774
775 if (!bo->pin_count) {
776 DRM_ERROR("unpin bad %p\n", bo);
777 return 0;
778 }
779 bo->pin_count--;
780 if (bo->pin_count)
781 return 0;
782
783 if (bo->kmap.virtual)
784 ttm_bo_kunmap(&bo->kmap);
785
786 vbox_ttm_placement(bo, VBOX_MEM_TYPE_SYSTEM);
787
788 for (i = 0; i < bo->placement.num_placement; i++)
789 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
790
791# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
792 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
793# else
794 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
795# endif
796 if (ret) {
797 DRM_ERROR("pushing to VRAM failed\n");
798 return ret;
799 }
800
801 return 0;
802}
803#endif
804
805int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
806{
807 struct drm_file *file_priv;
808 struct vbox_private *vbox;
809 int ret = -EINVAL;
810
811 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
812 return -EINVAL;
813
814 file_priv = filp->private_data;
815 vbox = file_priv->minor->dev->dev_private;
816
817#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
818 (void)vbox;
819 if (drm_dev_is_unplugged(file_priv->minor->dev))
820 return -ENODEV;
821 ret = drm_gem_mmap(filp, vma);
822#else
823 ret = ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
824#endif
825 return ret;
826}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette