VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_ttm.c@ 96407

Last change on this file since 96407 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.5 KB
Line 
1/* $Id: vbox_ttm.c 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2022 Oracle and/or its affiliates.
8 * This file is based on ast_ttm.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 *
32 * Authors: Dave Airlie <airlied@redhat.com>
33 * Michael Thayer <michael.thayer@oracle.com>
34 */
35#include "vbox_drv.h"
36#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_MAJ_PREREQ(8,5)
37# include <drm/drm_gem.h>
38# include <drm/drm_gem_ttm_helper.h>
39# include <drm/drm_gem_vram_helper.h>
40#else
41# include <drm/ttm/ttm_page_alloc.h>
42#endif
43
44#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
45# include <drm/ttm/ttm_range_manager.h>
46#endif
47
48#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
49#define PLACEMENT_FLAGS(placement) (placement)
50#else
51#define PLACEMENT_FLAGS(placement) ((placement).flags)
52#endif
53
54
55#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
56static inline struct vbox_private *vbox_bdev(struct ttm_device *bd)
57#else
58static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
59#endif
60{
61 return container_of(bd, struct vbox_private, ttm.bdev);
62}
63
64#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
65static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
66{
67 return ttm_mem_global_init(ref->object);
68}
69
70static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
71{
72 ttm_mem_global_release(ref->object);
73}
74
75/**
76 * Adds the vbox memory manager object/structures to the global memory manager.
77 */
78static int vbox_ttm_global_init(struct vbox_private *vbox)
79{
80 struct drm_global_reference *global_ref;
81 int ret;
82
83#if RTLNX_VER_MAX(5,0,0)
84 global_ref = &vbox->ttm.mem_global_ref;
85 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
86 global_ref->size = sizeof(struct ttm_mem_global);
87 global_ref->init = &vbox_ttm_mem_global_init;
88 global_ref->release = &vbox_ttm_mem_global_release;
89 ret = drm_global_item_ref(global_ref);
90 if (ret) {
91 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
92 return ret;
93 }
94
95 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
96#endif
97 global_ref = &vbox->ttm.bo_global_ref.ref;
98 global_ref->global_type = DRM_GLOBAL_TTM_BO;
99 global_ref->size = sizeof(struct ttm_bo_global);
100 global_ref->init = &ttm_bo_global_init;
101 global_ref->release = &ttm_bo_global_release;
102
103 ret = drm_global_item_ref(global_ref);
104 if (ret) {
105 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
106#if RTLNX_VER_MAX(5,0,0)
107 drm_global_item_unref(&vbox->ttm.mem_global_ref);
108#endif
109 return ret;
110 }
111
112 return 0;
113}
114
115/**
116 * Removes the vbox memory manager object from the global memory manager.
117 */
118static void vbox_ttm_global_release(struct vbox_private *vbox)
119{
120 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
121 drm_global_item_unref(&vbox->ttm.mem_global_ref);
122}
123#endif
124
125static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
126{
127 struct vbox_bo *bo;
128
129 bo = container_of(tbo, struct vbox_bo, bo);
130
131 drm_gem_object_release(&bo->gem);
132 kfree(bo);
133}
134
135static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
136{
137 if (bo->destroy == &vbox_bo_ttm_destroy)
138 return true;
139
140 return false;
141}
142
143#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
144static int
145vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
146 struct ttm_mem_type_manager *man)
147{
148 switch (type) {
149 case TTM_PL_SYSTEM:
150 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
151 man->available_caching = TTM_PL_MASK_CACHING;
152 man->default_caching = TTM_PL_FLAG_CACHED;
153 break;
154 case TTM_PL_VRAM:
155 man->func = &ttm_bo_manager_func;
156 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
157 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
158 man->default_caching = TTM_PL_FLAG_WC;
159 break;
160 default:
161 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
162 return -EINVAL;
163 }
164
165 return 0;
166}
167#endif
168
169static void
170vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
171{
172 struct vbox_bo *vboxbo = vbox_bo(bo);
173
174 if (!vbox_ttm_bo_is_vbox_bo(bo))
175 return;
176
177 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_SYSTEM);
178 *pl = vboxbo->placement;
179}
180
181#if RTLNX_VER_MAX(5,14,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
182static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
183 struct file *filp)
184{
185 return 0;
186}
187#endif
188
189#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
190static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
191 struct ttm_mem_reg *mem)
192{
193 struct vbox_private *vbox = vbox_bdev(bdev);
194 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
195
196 mem->bus.addr = NULL;
197 mem->bus.offset = 0;
198 mem->bus.size = mem->num_pages << PAGE_SHIFT;
199 mem->bus.base = 0;
200 mem->bus.is_iomem = false;
201 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
202 return -EINVAL;
203 switch (mem->mem_type) {
204 case TTM_PL_SYSTEM:
205 /* system memory */
206 return 0;
207 case TTM_PL_VRAM:
208 mem->bus.offset = mem->start << PAGE_SHIFT;
209 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
210 mem->bus.is_iomem = true;
211 break;
212 default:
213 return -EINVAL;
214 }
215 return 0;
216}
217#else
218# if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
219static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
220 struct ttm_resource *mem)
221# else /* > 5.13.0 */
222static int vbox_ttm_io_mem_reserve(struct ttm_device *bdev,
223 struct ttm_resource *mem)
224# endif /* > 5.13.0 */
225{
226 struct vbox_private *vbox = vbox_bdev(bdev);
227 mem->bus.addr = NULL;
228 mem->bus.offset = 0;
229# if RTLNX_VER_MAX(5,12,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
230 mem->size = mem->num_pages << PAGE_SHIFT;
231# endif
232 mem->start = 0;
233 mem->bus.is_iomem = false;
234 switch (mem->mem_type) {
235 case TTM_PL_SYSTEM:
236 /* system memory */
237 return 0;
238 case TTM_PL_VRAM:
239# if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
240 mem->bus.caching = ttm_write_combined;
241# endif
242# if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
243 mem->bus.offset = (mem->start << PAGE_SHIFT) + pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0);
244# else
245 mem->bus.offset = mem->start << PAGE_SHIFT;
246 mem->start = pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0);
247# endif
248 mem->bus.is_iomem = true;
249 break;
250 default:
251 return -EINVAL;
252 }
253 return 0;
254}
255#endif
256
257
258
259#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
260static void vbox_ttm_io_mem_free(struct ttm_device *bdev,
261 struct ttm_resource *mem)
262{
263}
264#elif RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
265static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
266 struct ttm_resource *mem)
267{
268}
269#else
270static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
271 struct ttm_mem_reg *mem)
272{
273}
274#endif
275
276#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
277static void vbox_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
278{
279 ttm_tt_fini(tt);
280 kfree(tt);
281}
282#elif RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
283static void vbox_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
284{
285 ttm_tt_fini(tt);
286 kfree(tt);
287}
288#else
289static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
290{
291 ttm_tt_fini(tt);
292 kfree(tt);
293}
294
295static struct ttm_backend_func vbox_tt_backend_func = {
296 .destroy = &vbox_ttm_backend_destroy,
297};
298#endif
299
300#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
301static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
302 unsigned long size,
303 u32 page_flags,
304 struct page *dummy_read_page)
305#else
306static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
307 u32 page_flags)
308#endif
309{
310 struct ttm_tt *tt;
311
312 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
313 if (!tt)
314 return NULL;
315
316#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
317 tt->func = &vbox_tt_backend_func;
318#endif
319#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
320 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
321#elif RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
322 if (ttm_tt_init(tt, bo, page_flags)) {
323#elif RTLNX_VER_MAX(5,19,0)
324 if (ttm_tt_init(tt, bo, page_flags, ttm_write_combined)) {
325#else
326 if (ttm_tt_init(tt, bo, page_flags, ttm_write_combined, 0)) {
327#endif
328 kfree(tt);
329 return NULL;
330 }
331
332 return tt;
333}
334
335#if RTLNX_VER_MAX(4,17,0)
336# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
337static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
338{
339 return ttm_pool_populate(ttm);
340}
341# else
342static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
343 struct ttm_operation_ctx *ctx)
344{
345 return ttm_pool_populate(ttm, ctx);
346}
347# endif
348
349static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
350{
351 ttm_pool_unpopulate(ttm);
352}
353#endif
354
355#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
356static int vbox_bo_move(struct ttm_buffer_object *bo, bool evict,
357 struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem,
358 struct ttm_place *hop)
359{
360 return ttm_bo_move_memcpy(bo, ctx, new_mem);
361}
362#endif
363
364#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
365static struct ttm_device_funcs vbox_bo_driver = {
366#else /* < 5.13.0 */
367static struct ttm_bo_driver vbox_bo_driver = {
368#endif /* < 5.13.0 */
369 .ttm_tt_create = vbox_ttm_tt_create,
370#if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
371 .ttm_tt_destroy = vbox_ttm_tt_destroy,
372#endif
373#if RTLNX_VER_MAX(4,17,0)
374 .ttm_tt_populate = vbox_ttm_tt_populate,
375 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
376#endif
377#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
378 .init_mem_type = vbox_bo_init_mem_type,
379#endif
380#if RTLNX_VER_MIN(4,10,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)
381 .eviction_valuable = ttm_bo_eviction_valuable,
382#endif
383 .evict_flags = vbox_bo_evict_flags,
384#if RTLNX_VER_MAX(5,14,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
385 .verify_access = vbox_bo_verify_access,
386#endif
387 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
388 .io_mem_free = &vbox_ttm_io_mem_free,
389#if RTLNX_VER_MIN(4,12,0) || RTLNX_RHEL_MAJ_PREREQ(7,5)
390# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
391 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
392# endif
393#endif
394#if (RTLNX_VER_RANGE(4,7,0, 4,11,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)) && !RTLNX_RHEL_MAJ_PREREQ(7,5)
395 .lru_tail = &ttm_bo_default_lru_tail,
396 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
397#endif
398#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
399 .move = &vbox_bo_move,
400#endif
401};
402
403int vbox_mm_init(struct vbox_private *vbox)
404{
405 int ret;
406 struct drm_device *dev = vbox->dev;
407#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
408 struct ttm_device *bdev = &vbox->ttm.bdev;
409#else
410 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
411#endif
412
413#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
414 ret = vbox_ttm_global_init(vbox);
415 if (ret)
416 return ret;
417#endif
418#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
419 ret = ttm_device_init(&vbox->ttm.bdev,
420#else
421 ret = ttm_bo_device_init(&vbox->ttm.bdev,
422#endif
423#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
424 vbox->ttm.bo_global_ref.ref.object,
425#endif
426 &vbox_bo_driver,
427#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
428 dev->dev,
429#endif
430#if RTLNX_VER_MIN(3,15,0) || RTLNX_RHEL_MAJ_PREREQ(7,1)
431 dev->anon_inode->i_mapping,
432#endif
433#if RTLNX_VER_MIN(5,5,0) || RTLNX_RHEL_MIN(8,3) || RTLNX_SUSE_MAJ_PREREQ(15,3)
434 dev->vma_offset_manager,
435#elif RTLNX_VER_MAX(5,2,0) && !RTLNX_RHEL_MAJ_PREREQ(8,2)
436 DRM_FILE_PAGE_OFFSET,
437#endif
438#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
439 false,
440#endif
441 true);
442 if (ret) {
443 DRM_ERROR("Error initialising bo driver; %d\n", ret);
444#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
445 goto err_ttm_global_release;
446#else
447 return ret;
448#endif
449 }
450
451#if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
452 ret = ttm_range_man_init(bdev, TTM_PL_VRAM, false,
453 vbox->available_vram_size >> PAGE_SHIFT);
454#else
455 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
456 vbox->available_vram_size >> PAGE_SHIFT);
457#endif
458 if (ret) {
459 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
460 goto err_device_release;
461 }
462
463#ifdef DRM_MTRR_WC
464 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(VBOX_DRM_TO_PCI_DEV(dev), 0),
465 pci_resource_len(VBOX_DRM_TO_PCI_DEV(dev), 0),
466 DRM_MTRR_WC);
467#else
468 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(VBOX_DRM_TO_PCI_DEV(dev), 0),
469 pci_resource_len(VBOX_DRM_TO_PCI_DEV(dev), 0));
470#endif
471 return 0;
472
473err_device_release:
474#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
475 ttm_device_fini(&vbox->ttm.bdev);
476#else
477 ttm_bo_device_release(&vbox->ttm.bdev);
478#endif
479#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
480err_ttm_global_release:
481 vbox_ttm_global_release(vbox);
482#endif
483 return ret;
484}
485
486void vbox_mm_fini(struct vbox_private *vbox)
487{
488#ifdef DRM_MTRR_WC
489 drm_mtrr_del(vbox->fb_mtrr,
490 pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0),
491 pci_resource_len(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0), DRM_MTRR_WC);
492#else
493 arch_phys_wc_del(vbox->fb_mtrr);
494#endif
495#if RTLNX_VER_MIN(5,13,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
496 ttm_device_fini(&vbox->ttm.bdev);
497#else
498 ttm_bo_device_release(&vbox->ttm.bdev);
499#endif
500#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
501 vbox_ttm_global_release(vbox);
502#endif
503}
504
505void vbox_ttm_placement(struct vbox_bo *bo, u32 mem_type)
506{
507 u32 c = 0;
508#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
509 bo->placement.fpfn = 0;
510 bo->placement.lpfn = 0;
511#else
512 unsigned int i;
513#endif
514
515 bo->placement.placement = bo->placements;
516 bo->placement.busy_placement = bo->placements;
517
518 if (mem_type & VBOX_MEM_TYPE_VRAM) {
519#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
520 bo->placements[c].mem_type = TTM_PL_VRAM;
521 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
522#elif RTLNX_VER_MIN(5,10,0)
523 bo->placements[c].mem_type = TTM_PL_VRAM;
524 PLACEMENT_FLAGS(bo->placements[c++]) =
525 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
526#else
527 PLACEMENT_FLAGS(bo->placements[c++]) =
528 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
529#endif
530 }
531 if (mem_type & VBOX_MEM_TYPE_SYSTEM) {
532#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
533 bo->placements[c].mem_type = TTM_PL_SYSTEM;
534 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
535#elif RTLNX_VER_MIN(5,10,0)
536 bo->placements[c].mem_type = TTM_PL_SYSTEM;
537 PLACEMENT_FLAGS(bo->placements[c++]) =
538 TTM_PL_MASK_CACHING;
539#else
540 PLACEMENT_FLAGS(bo->placements[c++]) =
541 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
542#endif
543 }
544 if (!c) {
545#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
546 bo->placements[c].mem_type = TTM_PL_SYSTEM;
547 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
548#elif RTLNX_VER_MIN(5,10,0)
549 bo->placements[c].mem_type = TTM_PL_SYSTEM;
550 PLACEMENT_FLAGS(bo->placements[c++]) =
551 TTM_PL_MASK_CACHING;
552#else
553 PLACEMENT_FLAGS(bo->placements[c++]) =
554 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
555#endif
556 }
557
558 bo->placement.num_placement = c;
559 bo->placement.num_busy_placement = c;
560
561#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
562 for (i = 0; i < c; ++i) {
563 bo->placements[i].fpfn = 0;
564 bo->placements[i].lpfn = 0;
565 }
566#endif
567}
568
569#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
570static const struct drm_gem_object_funcs vbox_drm_gem_object_funcs = {
571 .free = vbox_gem_free_object,
572 .print_info = drm_gem_ttm_print_info,
573# if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
574 .mmap = drm_gem_ttm_mmap,
575# endif
576};
577#endif
578
579int vbox_bo_create(struct drm_device *dev, int size, int align,
580 u32 flags, struct vbox_bo **pvboxbo)
581{
582 struct vbox_private *vbox = dev->dev_private;
583 struct vbox_bo *vboxbo;
584#if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
585 size_t acc_size;
586#endif
587 int ret;
588
589 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
590 if (!vboxbo)
591 return -ENOMEM;
592
593 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
594 if (ret)
595 goto err_free_vboxbo;
596
597#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
598 if (!vboxbo->gem.funcs) {
599 vboxbo->gem.funcs = &vbox_drm_gem_object_funcs;
600 }
601#endif
602 vboxbo->bo.bdev = &vbox->ttm.bdev;
603#if RTLNX_VER_MAX(3,15,0) && !RTLNX_RHEL_MAJ_PREREQ(7,1)
604 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
605#endif
606
607 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_VRAM | VBOX_MEM_TYPE_SYSTEM);
608
609#if RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99)
610 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
611 sizeof(struct vbox_bo));
612#endif
613
614#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
615 /* Initialization of the following was removed from DRM stack
616 * in 5.14, so we need to do it manually. */
617 vboxbo->bo.base.funcs = &vbox_drm_gem_object_funcs;
618 kref_init(&vboxbo->bo.base.refcount);
619 vboxbo->bo.base.size = size;
620 vboxbo->bo.base.dev = dev;
621 dma_resv_init(&vboxbo->bo.base._resv);
622 drm_vma_node_reset(&vboxbo->bo.base.vma_node);
623#endif
624
625 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
626 ttm_bo_type_device, &vboxbo->placement,
627#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
628 align >> PAGE_SHIFT, false, NULL, acc_size,
629#elif RTLNX_VER_MAX(5,13,0) && !RTLNX_RHEL_RANGE(8,6, 8,99) /* < 5.13.0, < RHEL(8.6, 8.99) */
630 align >> PAGE_SHIFT, false, acc_size,
631#else /* > 5.13.0 */
632 align >> PAGE_SHIFT, false,
633#endif /* > 5.13.0 */
634#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
635 NULL, NULL, vbox_bo_ttm_destroy);
636#else
637 NULL, vbox_bo_ttm_destroy);
638#endif
639 if (ret)
640 {
641 /* In case of failure, ttm_bo_init() supposed to call
642 * vbox_bo_ttm_destroy() which in turn will free @vboxbo. */
643 goto err_exit;
644 }
645
646 *pvboxbo = vboxbo;
647
648 return 0;
649
650err_free_vboxbo:
651 kfree(vboxbo);
652err_exit:
653 return ret;
654}
655
656static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
657{
658#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
659 return bo->bo.resource->start << PAGE_SHIFT;
660#elif RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
661 return bo->bo.mem.start << PAGE_SHIFT;
662#else
663 return bo->bo.offset;
664#endif
665}
666
667int vbox_bo_pin(struct vbox_bo *bo, u32 mem_type, u64 *gpu_addr)
668{
669#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
670 struct ttm_operation_ctx ctx = { false, false };
671#endif
672 int ret;
673#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
674 int i;
675#endif
676
677 if (bo->pin_count) {
678 bo->pin_count++;
679 if (gpu_addr)
680 *gpu_addr = vbox_bo_gpu_offset(bo);
681
682 return 0;
683 }
684
685 vbox_ttm_placement(bo, mem_type);
686
687#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
688 for (i = 0; i < bo->placement.num_placement; i++)
689 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
690#endif
691
692#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
693 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
694#else
695 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
696#endif
697 if (ret)
698 return ret;
699
700 bo->pin_count = 1;
701
702#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
703 ttm_bo_pin(&bo->bo);
704#endif
705
706 if (gpu_addr)
707 *gpu_addr = vbox_bo_gpu_offset(bo);
708
709 return 0;
710}
711
712int vbox_bo_unpin(struct vbox_bo *bo)
713{
714#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
715# if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
716 struct ttm_operation_ctx ctx = { false, false };
717# endif
718#endif
719 int ret = 0;
720#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
721 int i;
722#endif
723
724 if (!bo->pin_count) {
725 DRM_ERROR("unpin bad %p\n", bo);
726 return 0;
727 }
728 bo->pin_count--;
729 if (bo->pin_count)
730 return 0;
731
732#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
733 for (i = 0; i < bo->placement.num_placement; i++)
734 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
735#endif
736
737#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
738 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
739#elif RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
740 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
741#endif
742 if (ret)
743 return ret;
744
745#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
746 ttm_bo_unpin(&bo->bo);
747#endif
748
749 return 0;
750}
751
752#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
753/*
754 * Move a vbox-owned buffer object to system memory if no one else has it
755 * pinned. The caller must have pinned it previously, and this call will
756 * release the caller's pin.
757 */
758int vbox_bo_push_sysram(struct vbox_bo *bo)
759{
760# if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
761 struct ttm_operation_ctx ctx = { false, false };
762# endif
763 int i, ret;
764
765 if (!bo->pin_count) {
766 DRM_ERROR("unpin bad %p\n", bo);
767 return 0;
768 }
769 bo->pin_count--;
770 if (bo->pin_count)
771 return 0;
772
773 if (bo->kmap.virtual)
774 ttm_bo_kunmap(&bo->kmap);
775
776 vbox_ttm_placement(bo, VBOX_MEM_TYPE_SYSTEM);
777
778 for (i = 0; i < bo->placement.num_placement; i++)
779 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
780
781# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
782 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
783# else
784 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
785# endif
786 if (ret) {
787 DRM_ERROR("pushing to VRAM failed\n");
788 return ret;
789 }
790
791 return 0;
792}
793#endif
794
795int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
796{
797 struct drm_file *file_priv;
798 struct vbox_private *vbox;
799 int ret = -EINVAL;
800
801 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
802 return -EINVAL;
803
804 file_priv = filp->private_data;
805 vbox = file_priv->minor->dev->dev_private;
806
807#if RTLNX_VER_MIN(5,14,0) || RTLNX_RHEL_RANGE(8,6, 8,99)
808 (void)vbox;
809 if (drm_dev_is_unplugged(file_priv->minor->dev))
810 return -ENODEV;
811 ret = drm_gem_mmap(filp, vma);
812#else
813 ret = ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
814#endif
815 return ret;
816}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette