VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_ttm.c@ 93228

Last change on this file since 93228 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.8 KB
Line 
1/* $Id: vbox_ttm.c 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2022 Oracle Corporation
8 * This file is based on ast_ttm.c
9 * Copyright 2012 Red Hat Inc.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 *
32 * Authors: Dave Airlie <airlied@redhat.com>
33 * Michael Thayer <michael.thayer@oracle.com>
34 */
35#include "vbox_drv.h"
36#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_MAJ_PREREQ(8,5)
37# include <drm/drm_gem.h>
38# include <drm/drm_gem_ttm_helper.h>
39# include <drm/drm_gem_vram_helper.h>
40#else
41# include <drm/ttm/ttm_page_alloc.h>
42#endif
43
44#if RTLNX_VER_MIN(5,14,0)
45# include <drm/ttm/ttm_range_manager.h>
46#endif
47
48#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
49#define PLACEMENT_FLAGS(placement) (placement)
50#else
51#define PLACEMENT_FLAGS(placement) ((placement).flags)
52#endif
53
54
55#if RTLNX_VER_MIN(5,13,0)
56static inline struct vbox_private *vbox_bdev(struct ttm_device *bd)
57#else
58static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
59#endif
60{
61 return container_of(bd, struct vbox_private, ttm.bdev);
62}
63
64#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
65static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
66{
67 return ttm_mem_global_init(ref->object);
68}
69
70static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
71{
72 ttm_mem_global_release(ref->object);
73}
74
75/**
76 * Adds the vbox memory manager object/structures to the global memory manager.
77 */
78static int vbox_ttm_global_init(struct vbox_private *vbox)
79{
80 struct drm_global_reference *global_ref;
81 int ret;
82
83#if RTLNX_VER_MAX(5,0,0)
84 global_ref = &vbox->ttm.mem_global_ref;
85 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
86 global_ref->size = sizeof(struct ttm_mem_global);
87 global_ref->init = &vbox_ttm_mem_global_init;
88 global_ref->release = &vbox_ttm_mem_global_release;
89 ret = drm_global_item_ref(global_ref);
90 if (ret) {
91 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
92 return ret;
93 }
94
95 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
96#endif
97 global_ref = &vbox->ttm.bo_global_ref.ref;
98 global_ref->global_type = DRM_GLOBAL_TTM_BO;
99 global_ref->size = sizeof(struct ttm_bo_global);
100 global_ref->init = &ttm_bo_global_init;
101 global_ref->release = &ttm_bo_global_release;
102
103 ret = drm_global_item_ref(global_ref);
104 if (ret) {
105 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
106#if RTLNX_VER_MAX(5,0,0)
107 drm_global_item_unref(&vbox->ttm.mem_global_ref);
108#endif
109 return ret;
110 }
111
112 return 0;
113}
114
115/**
116 * Removes the vbox memory manager object from the global memory manager.
117 */
118static void vbox_ttm_global_release(struct vbox_private *vbox)
119{
120 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
121 drm_global_item_unref(&vbox->ttm.mem_global_ref);
122}
123#endif
124
125static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
126{
127 struct vbox_bo *bo;
128
129 bo = container_of(tbo, struct vbox_bo, bo);
130
131 drm_gem_object_release(&bo->gem);
132 kfree(bo);
133}
134
135static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
136{
137 if (bo->destroy == &vbox_bo_ttm_destroy)
138 return true;
139
140 return false;
141}
142
143#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
144static int
145vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
146 struct ttm_mem_type_manager *man)
147{
148 switch (type) {
149 case TTM_PL_SYSTEM:
150 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
151 man->available_caching = TTM_PL_MASK_CACHING;
152 man->default_caching = TTM_PL_FLAG_CACHED;
153 break;
154 case TTM_PL_VRAM:
155 man->func = &ttm_bo_manager_func;
156 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
157 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
158 man->default_caching = TTM_PL_FLAG_WC;
159 break;
160 default:
161 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
162 return -EINVAL;
163 }
164
165 return 0;
166}
167#endif
168
169static void
170vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
171{
172 struct vbox_bo *vboxbo = vbox_bo(bo);
173
174 if (!vbox_ttm_bo_is_vbox_bo(bo))
175 return;
176
177 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_SYSTEM);
178 *pl = vboxbo->placement;
179}
180
181#if RTLNX_VER_MAX(5,14,0)
182static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
183 struct file *filp)
184{
185 return 0;
186}
187#endif
188
189#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
190static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
191 struct ttm_mem_reg *mem)
192{
193 struct vbox_private *vbox = vbox_bdev(bdev);
194 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
195
196 mem->bus.addr = NULL;
197 mem->bus.offset = 0;
198 mem->bus.size = mem->num_pages << PAGE_SHIFT;
199 mem->bus.base = 0;
200 mem->bus.is_iomem = false;
201 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
202 return -EINVAL;
203 switch (mem->mem_type) {
204 case TTM_PL_SYSTEM:
205 /* system memory */
206 return 0;
207 case TTM_PL_VRAM:
208 mem->bus.offset = mem->start << PAGE_SHIFT;
209 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
210 mem->bus.is_iomem = true;
211 break;
212 default:
213 return -EINVAL;
214 }
215 return 0;
216}
217#else
218# if RTLNX_VER_MAX(5,13,0)
219static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
220 struct ttm_resource *mem)
221# else /* > 5.13.0 */
222static int vbox_ttm_io_mem_reserve(struct ttm_device *bdev,
223 struct ttm_resource *mem)
224# endif /* > 5.13.0 */
225{
226 struct vbox_private *vbox = vbox_bdev(bdev);
227 mem->bus.addr = NULL;
228 mem->bus.offset = 0;
229# if RTLNX_VER_MAX(5,12,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
230 mem->size = mem->num_pages << PAGE_SHIFT;
231# endif
232 mem->start = 0;
233 mem->bus.is_iomem = false;
234 switch (mem->mem_type) {
235 case TTM_PL_SYSTEM:
236 /* system memory */
237 return 0;
238 case TTM_PL_VRAM:
239# if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
240 mem->bus.caching = ttm_write_combined;
241# endif
242# if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
243 mem->bus.offset = (mem->start << PAGE_SHIFT) + pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0);
244# else
245 mem->bus.offset = mem->start << PAGE_SHIFT;
246 mem->start = pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0);
247# endif
248 mem->bus.is_iomem = true;
249 break;
250 default:
251 return -EINVAL;
252 }
253 return 0;
254}
255#endif
256
257
258
259#if RTLNX_VER_MIN(5,13,0)
260static void vbox_ttm_io_mem_free(struct ttm_device *bdev,
261 struct ttm_resource *mem)
262{
263}
264#elif RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
265static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
266 struct ttm_resource *mem)
267{
268}
269#else
270static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
271 struct ttm_mem_reg *mem)
272{
273}
274#endif
275
276#if RTLNX_VER_MIN(5,13,0)
277static void vbox_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
278{
279 ttm_tt_fini(tt);
280 kfree(tt);
281}
282#elif RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
283static void vbox_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
284{
285 ttm_tt_fini(tt);
286 kfree(tt);
287}
288#else
289static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
290{
291 ttm_tt_fini(tt);
292 kfree(tt);
293}
294
295static struct ttm_backend_func vbox_tt_backend_func = {
296 .destroy = &vbox_ttm_backend_destroy,
297};
298#endif
299
300#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
301static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
302 unsigned long size,
303 u32 page_flags,
304 struct page *dummy_read_page)
305#else
306static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
307 u32 page_flags)
308#endif
309{
310 struct ttm_tt *tt;
311
312 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
313 if (!tt)
314 return NULL;
315
316#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
317 tt->func = &vbox_tt_backend_func;
318#endif
319#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
320 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
321#elif RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_RANGE(8,5, 8,99)
322 if (ttm_tt_init(tt, bo, page_flags)) {
323#else
324 if (ttm_tt_init(tt, bo, page_flags, ttm_write_combined)) {
325#endif
326 kfree(tt);
327 return NULL;
328 }
329
330 return tt;
331}
332
333#if RTLNX_VER_MAX(4,17,0)
334# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
335static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
336{
337 return ttm_pool_populate(ttm);
338}
339# else
340static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
341 struct ttm_operation_ctx *ctx)
342{
343 return ttm_pool_populate(ttm, ctx);
344}
345# endif
346
347static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
348{
349 ttm_pool_unpopulate(ttm);
350}
351#endif
352
353#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
354static int vbox_bo_move(struct ttm_buffer_object *bo, bool evict,
355 struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem,
356 struct ttm_place *hop)
357{
358 return ttm_bo_move_memcpy(bo, ctx, new_mem);
359}
360#endif
361
362#if RTLNX_VER_MIN(5,13,0)
363static struct ttm_device_funcs vbox_bo_driver = {
364#else /* < 5.13.0 */
365static struct ttm_bo_driver vbox_bo_driver = {
366#endif /* < 5.13.0 */
367 .ttm_tt_create = vbox_ttm_tt_create,
368#if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
369 .ttm_tt_destroy = vbox_ttm_tt_destroy,
370#endif
371#if RTLNX_VER_MAX(4,17,0)
372 .ttm_tt_populate = vbox_ttm_tt_populate,
373 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
374#endif
375#if RTLNX_VER_MAX(5,10,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
376 .init_mem_type = vbox_bo_init_mem_type,
377#endif
378#if RTLNX_VER_MIN(4,10,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)
379 .eviction_valuable = ttm_bo_eviction_valuable,
380#endif
381 .evict_flags = vbox_bo_evict_flags,
382#if RTLNX_VER_MAX(5,14,0)
383 .verify_access = vbox_bo_verify_access,
384#endif
385 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
386 .io_mem_free = &vbox_ttm_io_mem_free,
387#if RTLNX_VER_MIN(4,12,0) || RTLNX_RHEL_MAJ_PREREQ(7,5)
388# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
389 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
390# endif
391#endif
392#if (RTLNX_VER_RANGE(4,7,0, 4,11,0) || RTLNX_RHEL_MAJ_PREREQ(7,4)) && !RTLNX_RHEL_MAJ_PREREQ(7,5)
393 .lru_tail = &ttm_bo_default_lru_tail,
394 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
395#endif
396#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
397 .move = &vbox_bo_move,
398#endif
399};
400
401int vbox_mm_init(struct vbox_private *vbox)
402{
403 int ret;
404 struct drm_device *dev = vbox->dev;
405#if RTLNX_VER_MIN(5,13,0)
406 struct ttm_device *bdev = &vbox->ttm.bdev;
407#else
408 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
409#endif
410
411#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
412 ret = vbox_ttm_global_init(vbox);
413 if (ret)
414 return ret;
415#endif
416#if RTLNX_VER_MIN(5,13,0)
417 ret = ttm_device_init(&vbox->ttm.bdev,
418#else
419 ret = ttm_bo_device_init(&vbox->ttm.bdev,
420#endif
421#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
422 vbox->ttm.bo_global_ref.ref.object,
423#endif
424 &vbox_bo_driver,
425#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
426 dev->dev,
427#endif
428#if RTLNX_VER_MIN(3,15,0) || RTLNX_RHEL_MAJ_PREREQ(7,1)
429 dev->anon_inode->i_mapping,
430#endif
431#if RTLNX_VER_MIN(5,5,0) || RTLNX_RHEL_MIN(8,3) || RTLNX_SUSE_MAJ_PREREQ(15,3)
432 dev->vma_offset_manager,
433#elif RTLNX_VER_MAX(5,2,0) && !RTLNX_RHEL_MAJ_PREREQ(8,2)
434 DRM_FILE_PAGE_OFFSET,
435#endif
436#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
437 false,
438#endif
439 true);
440 if (ret) {
441 DRM_ERROR("Error initialising bo driver; %d\n", ret);
442#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
443 goto err_ttm_global_release;
444#else
445 return ret;
446#endif
447 }
448
449#if RTLNX_VER_MIN(5,10,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
450 ret = ttm_range_man_init(bdev, TTM_PL_VRAM, false,
451 vbox->available_vram_size >> PAGE_SHIFT);
452#else
453 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
454 vbox->available_vram_size >> PAGE_SHIFT);
455#endif
456 if (ret) {
457 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
458 goto err_device_release;
459 }
460
461#ifdef DRM_MTRR_WC
462 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(VBOX_DRM_TO_PCI_DEV(dev), 0),
463 pci_resource_len(VBOX_DRM_TO_PCI_DEV(dev), 0),
464 DRM_MTRR_WC);
465#else
466 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(VBOX_DRM_TO_PCI_DEV(dev), 0),
467 pci_resource_len(VBOX_DRM_TO_PCI_DEV(dev), 0));
468#endif
469 return 0;
470
471err_device_release:
472#if RTLNX_VER_MIN(5,13,0)
473 ttm_device_fini(&vbox->ttm.bdev);
474#else
475 ttm_bo_device_release(&vbox->ttm.bdev);
476#endif
477#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
478err_ttm_global_release:
479 vbox_ttm_global_release(vbox);
480#endif
481 return ret;
482}
483
484void vbox_mm_fini(struct vbox_private *vbox)
485{
486#ifdef DRM_MTRR_WC
487 drm_mtrr_del(vbox->fb_mtrr,
488 pci_resource_start(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0),
489 pci_resource_len(VBOX_DRM_TO_PCI_DEV(vbox->dev), 0), DRM_MTRR_WC);
490#else
491 arch_phys_wc_del(vbox->fb_mtrr);
492#endif
493#if RTLNX_VER_MIN(5,13,0)
494 ttm_device_fini(&vbox->ttm.bdev);
495#else
496 ttm_bo_device_release(&vbox->ttm.bdev);
497#endif
498#if RTLNX_VER_MAX(5,0,0) && !RTLNX_RHEL_MAJ_PREREQ(7,7) && !RTLNX_RHEL_MAJ_PREREQ(8,1)
499 vbox_ttm_global_release(vbox);
500#endif
501}
502
503void vbox_ttm_placement(struct vbox_bo *bo, u32 mem_type)
504{
505 u32 c = 0;
506#if RTLNX_VER_MAX(3,18,0) && !RTLNX_RHEL_MAJ_PREREQ(7,2)
507 bo->placement.fpfn = 0;
508 bo->placement.lpfn = 0;
509#else
510 unsigned int i;
511#endif
512
513 bo->placement.placement = bo->placements;
514 bo->placement.busy_placement = bo->placements;
515
516 if (mem_type & VBOX_MEM_TYPE_VRAM) {
517#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
518 bo->placements[c].mem_type = TTM_PL_VRAM;
519 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
520#elif RTLNX_VER_MIN(5,10,0)
521 bo->placements[c].mem_type = TTM_PL_VRAM;
522 PLACEMENT_FLAGS(bo->placements[c++]) =
523 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
524#else
525 PLACEMENT_FLAGS(bo->placements[c++]) =
526 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
527#endif
528 }
529 if (mem_type & VBOX_MEM_TYPE_SYSTEM) {
530#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
531 bo->placements[c].mem_type = TTM_PL_SYSTEM;
532 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
533#elif RTLNX_VER_MIN(5,10,0)
534 bo->placements[c].mem_type = TTM_PL_SYSTEM;
535 PLACEMENT_FLAGS(bo->placements[c++]) =
536 TTM_PL_MASK_CACHING;
537#else
538 PLACEMENT_FLAGS(bo->placements[c++]) =
539 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
540#endif
541 }
542 if (!c) {
543#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
544 bo->placements[c].mem_type = TTM_PL_SYSTEM;
545 PLACEMENT_FLAGS(bo->placements[c++]) = 0;
546#elif RTLNX_VER_MIN(5,10,0)
547 bo->placements[c].mem_type = TTM_PL_SYSTEM;
548 PLACEMENT_FLAGS(bo->placements[c++]) =
549 TTM_PL_MASK_CACHING;
550#else
551 PLACEMENT_FLAGS(bo->placements[c++]) =
552 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
553#endif
554 }
555
556 bo->placement.num_placement = c;
557 bo->placement.num_busy_placement = c;
558
559#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
560 for (i = 0; i < c; ++i) {
561 bo->placements[i].fpfn = 0;
562 bo->placements[i].lpfn = 0;
563 }
564#endif
565}
566
567#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
568static const struct drm_gem_object_funcs vbox_drm_gem_object_funcs = {
569 .free = vbox_gem_free_object,
570 .print_info = drm_gem_ttm_print_info,
571# if RTLNX_VER_MIN(5,14,0)
572 .mmap = drm_gem_ttm_mmap,
573# endif
574};
575#endif
576
577int vbox_bo_create(struct drm_device *dev, int size, int align,
578 u32 flags, struct vbox_bo **pvboxbo)
579{
580 struct vbox_private *vbox = dev->dev_private;
581 struct vbox_bo *vboxbo;
582#if RTLNX_VER_MAX(5,13,0)
583 size_t acc_size;
584#endif
585 int ret;
586
587 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
588 if (!vboxbo)
589 return -ENOMEM;
590
591 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
592 if (ret)
593 goto err_free_vboxbo;
594
595#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
596 if (!vboxbo->gem.funcs) {
597 vboxbo->gem.funcs = &vbox_drm_gem_object_funcs;
598 }
599#endif
600 vboxbo->bo.bdev = &vbox->ttm.bdev;
601#if RTLNX_VER_MAX(3,15,0) && !RTLNX_RHEL_MAJ_PREREQ(7,1)
602 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
603#endif
604
605 vbox_ttm_placement(vboxbo, VBOX_MEM_TYPE_VRAM | VBOX_MEM_TYPE_SYSTEM);
606
607#if RTLNX_VER_MAX(5,13,0)
608 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
609 sizeof(struct vbox_bo));
610#endif
611
612#if RTLNX_VER_MIN(5,14,0)
613 /* Initialization of the following was removed from DRM stack
614 * in 5.14, so we need to do it manually. */
615 vboxbo->bo.base.funcs = &vbox_drm_gem_object_funcs;
616 kref_init(&vboxbo->bo.base.refcount);
617 vboxbo->bo.base.size = size;
618 vboxbo->bo.base.dev = dev;
619 dma_resv_init(&vboxbo->bo.base._resv);
620 drm_vma_node_reset(&vboxbo->bo.base.vma_node);
621#endif
622
623 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
624 ttm_bo_type_device, &vboxbo->placement,
625#if RTLNX_VER_MAX(4,17,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
626 align >> PAGE_SHIFT, false, NULL, acc_size,
627#elif RTLNX_VER_MAX(5,13,0) /* < 5.13.0 */
628 align >> PAGE_SHIFT, false, acc_size,
629#else /* > 5.13.0 */
630 align >> PAGE_SHIFT, false,
631#endif /* > 5.13.0 */
632#if RTLNX_VER_MIN(3,18,0) || RTLNX_RHEL_MAJ_PREREQ(7,2)
633 NULL, NULL, vbox_bo_ttm_destroy);
634#else
635 NULL, vbox_bo_ttm_destroy);
636#endif
637 if (ret)
638 {
639 /* In case of failure, ttm_bo_init() supposed to call
640 * vbox_bo_ttm_destroy() which in turn will free @vboxbo. */
641 goto err_exit;
642 }
643
644 *pvboxbo = vboxbo;
645
646 return 0;
647
648err_free_vboxbo:
649 kfree(vboxbo);
650err_exit:
651 return ret;
652}
653
654static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
655{
656#if RTLNX_VER_MIN(5,14,0)
657 return bo->bo.resource->start << PAGE_SHIFT;
658#elif RTLNX_VER_MIN(5,9,0) || RTLNX_RHEL_MIN(8,4) || RTLNX_SUSE_MAJ_PREREQ(15,3)
659 return bo->bo.mem.start << PAGE_SHIFT;
660#else
661 return bo->bo.offset;
662#endif
663}
664
665int vbox_bo_pin(struct vbox_bo *bo, u32 mem_type, u64 *gpu_addr)
666{
667#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
668 struct ttm_operation_ctx ctx = { false, false };
669#endif
670 int ret;
671#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
672 int i;
673#endif
674
675 if (bo->pin_count) {
676 bo->pin_count++;
677 if (gpu_addr)
678 *gpu_addr = vbox_bo_gpu_offset(bo);
679
680 return 0;
681 }
682
683 vbox_ttm_placement(bo, mem_type);
684
685#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
686 for (i = 0; i < bo->placement.num_placement; i++)
687 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
688#endif
689
690#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
691 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
692#else
693 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
694#endif
695 if (ret)
696 return ret;
697
698 bo->pin_count = 1;
699
700#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
701 ttm_bo_pin(&bo->bo);
702#endif
703
704 if (gpu_addr)
705 *gpu_addr = vbox_bo_gpu_offset(bo);
706
707 return 0;
708}
709
710int vbox_bo_unpin(struct vbox_bo *bo)
711{
712#if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
713# if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
714 struct ttm_operation_ctx ctx = { false, false };
715# endif
716#endif
717 int ret = 0;
718#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
719 int i;
720#endif
721
722 if (!bo->pin_count) {
723 DRM_ERROR("unpin bad %p\n", bo);
724 return 0;
725 }
726 bo->pin_count--;
727 if (bo->pin_count)
728 return 0;
729
730#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
731 for (i = 0; i < bo->placement.num_placement; i++)
732 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
733#endif
734
735#if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
736 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
737#elif RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
738 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
739#endif
740 if (ret)
741 return ret;
742
743#if RTLNX_VER_MIN(5,11,0) || RTLNX_RHEL_RANGE(8,5, 8,99)
744 ttm_bo_unpin(&bo->bo);
745#endif
746
747 return 0;
748}
749
750#if RTLNX_VER_MAX(5,11,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)
751/*
752 * Move a vbox-owned buffer object to system memory if no one else has it
753 * pinned. The caller must have pinned it previously, and this call will
754 * release the caller's pin.
755 */
756int vbox_bo_push_sysram(struct vbox_bo *bo)
757{
758# if RTLNX_VER_MIN(4,16,0) || RTLNX_RHEL_MAJ_PREREQ(7,6) || RTLNX_SUSE_MAJ_PREREQ(15,1) || RTLNX_SUSE_MAJ_PREREQ(12,5)
759 struct ttm_operation_ctx ctx = { false, false };
760# endif
761 int i, ret;
762
763 if (!bo->pin_count) {
764 DRM_ERROR("unpin bad %p\n", bo);
765 return 0;
766 }
767 bo->pin_count--;
768 if (bo->pin_count)
769 return 0;
770
771 if (bo->kmap.virtual)
772 ttm_bo_kunmap(&bo->kmap);
773
774 vbox_ttm_placement(bo, VBOX_MEM_TYPE_SYSTEM);
775
776 for (i = 0; i < bo->placement.num_placement; i++)
777 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
778
779# if RTLNX_VER_MAX(4,16,0) && !RTLNX_RHEL_MAJ_PREREQ(7,6) && !RTLNX_SUSE_MAJ_PREREQ(15,1) && !RTLNX_SUSE_MAJ_PREREQ(12,5)
780 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
781# else
782 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
783# endif
784 if (ret) {
785 DRM_ERROR("pushing to VRAM failed\n");
786 return ret;
787 }
788
789 return 0;
790}
791#endif
792
793int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
794{
795 struct drm_file *file_priv;
796 struct vbox_private *vbox;
797 int ret = -EINVAL;
798
799 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
800 return -EINVAL;
801
802 file_priv = filp->private_data;
803 vbox = file_priv->minor->dev->dev_private;
804
805#if RTLNX_VER_MIN(5,14,0)
806 if (drm_dev_is_unplugged(file_priv->minor->dev))
807 return -ENODEV;
808 ret = drm_gem_mmap(filp, vma);
809#else
810 ret = ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
811#endif
812 return ret;
813}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette