VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/GMM.cpp@ 99051

Last change on this file since 99051 was 98103, checked in by vboxsync, 23 months ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 13.6 KB
Line 
1/* $Id: GMM.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * GMM - Global Memory Manager, ring-3 request wrappers.
4 */
5
6/*
7 * Copyright (C) 2008-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_GMM
33#include <VBox/vmm/gmm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/vmcc.h>
36#include <VBox/sup.h>
37#include <VBox/err.h>
38#include <VBox/param.h>
39
40#include <iprt/assert.h>
41#include <VBox/log.h>
42#include <iprt/mem.h>
43#include <iprt/string.h>
44
45
46/**
47 * @see GMMR0InitialReservation
48 */
49GMMR3DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
50 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority)
51{
52 if (!SUPR3IsDriverless())
53 {
54 GMMINITIALRESERVATIONREQ Req;
55 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
56 Req.Hdr.cbReq = sizeof(Req);
57 Req.cBasePages = cBasePages;
58 Req.cShadowPages = cShadowPages;
59 Req.cFixedPages = cFixedPages;
60 Req.enmPolicy = enmPolicy;
61 Req.enmPriority = enmPriority;
62 return VMMR3CallR0(pVM, VMMR0_DO_GMM_INITIAL_RESERVATION, 0, &Req.Hdr);
63 }
64 return VINF_SUCCESS;
65}
66
67
68/**
69 * @see GMMR0UpdateReservation
70 */
71GMMR3DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
72{
73 if (!SUPR3IsDriverless())
74 {
75 GMMUPDATERESERVATIONREQ Req;
76 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
77 Req.Hdr.cbReq = sizeof(Req);
78 Req.cBasePages = cBasePages;
79 Req.cShadowPages = cShadowPages;
80 Req.cFixedPages = cFixedPages;
81 return VMMR3CallR0(pVM, VMMR0_DO_GMM_UPDATE_RESERVATION, 0, &Req.Hdr);
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Prepares a GMMR0AllocatePages request.
89 *
90 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
91 * @param pVM The cross context VM structure.
92 * @param[out] ppReq Where to store the pointer to the request packet.
93 * @param cPages The number of pages that's to be allocated.
94 * @param enmAccount The account to charge.
95 */
96GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
97{
98 uint32_t cb = RT_UOFFSETOF_DYN(GMMALLOCATEPAGESREQ, aPages[cPages]);
99 PGMMALLOCATEPAGESREQ pReq = (PGMMALLOCATEPAGESREQ)RTMemTmpAllocZ(cb);
100 if (!pReq)
101 return VERR_NO_TMP_MEMORY;
102
103 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
104 pReq->Hdr.cbReq = cb;
105 pReq->enmAccount = enmAccount;
106 pReq->cPages = cPages;
107 NOREF(pVM);
108 *ppReq = pReq;
109 return VINF_SUCCESS;
110}
111
112
113/**
114 * Performs a GMMR0AllocatePages request.
115 *
116 * This will call VMSetError on failure.
117 *
118 * @returns VBox status code.
119 * @param pVM The cross context VM structure.
120 * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
121 */
122GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq)
123{
124 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr);
125 if (RT_SUCCESS(rc))
126 {
127#ifdef LOG_ENABLED
128 for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++)
129 Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp fZeroed=%d\n",
130 pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys, pReq->aPages[iPage].fZeroed));
131#endif
132 return rc;
133 }
134 return VMSetError(pVM, rc, RT_SRC_POS, N_("GMMR0AllocatePages failed to allocate %u pages"), pReq->cPages);
135}
136
137
138/**
139 * Cleans up a GMMR0AllocatePages request.
140 * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
141 */
142GMMR3DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq)
143{
144 RTMemTmpFree(pReq);
145}
146
147
148/**
149 * Prepares a GMMR0FreePages request.
150 *
151 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
152 * @param pVM The cross context VM structure.
153 * @param[out] ppReq Where to store the pointer to the request packet.
154 * @param cPages The number of pages that's to be freed.
155 * @param enmAccount The account to charge.
156 */
157GMMR3DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
158{
159 uint32_t cb = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[cPages]);
160 PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
161 if (!pReq)
162 return VERR_NO_TMP_MEMORY;
163
164 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
165 pReq->Hdr.cbReq = cb;
166 pReq->enmAccount = enmAccount;
167 pReq->cPages = cPages;
168 NOREF(pVM);
169 *ppReq = pReq;
170 return VINF_SUCCESS;
171}
172
173
174/**
175 * Re-prepares a GMMR0FreePages request.
176 *
177 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
178 * @param pVM The cross context VM structure.
179 * @param pReq A request buffer previously returned by
180 * GMMR3FreePagesPrepare().
181 * @param cPages The number of pages originally passed to
182 * GMMR3FreePagesPrepare().
183 * @param enmAccount The account to charge.
184 */
185GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount)
186{
187 Assert(pReq->Hdr.u32Magic == SUPVMMR0REQHDR_MAGIC);
188 pReq->Hdr.cbReq = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[cPages]);
189 pReq->enmAccount = enmAccount;
190 pReq->cPages = cPages;
191 NOREF(pVM);
192}
193
194
195/**
196 * Performs a GMMR0FreePages request.
197 * This will call VMSetError on failure.
198 *
199 * @returns VBox status code.
200 * @param pVM The cross context VM structure.
201 * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
202 * @param cActualPages The number of pages actually freed.
203 */
204GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages)
205{
206 /*
207 * Adjust the request if we ended up with fewer pages than anticipated.
208 */
209 if (cActualPages != pReq->cPages)
210 {
211 AssertReturn(cActualPages < pReq->cPages, VERR_GMM_ACTUAL_PAGES_IPE);
212 if (!cActualPages)
213 return VINF_SUCCESS;
214 pReq->cPages = cActualPages;
215 pReq->Hdr.cbReq = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[cActualPages]);
216 }
217
218 /*
219 * Do the job.
220 */
221 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
222 if (RT_SUCCESS(rc))
223 return rc;
224 AssertRC(rc);
225 return VMSetError(pVM, rc, RT_SRC_POS,
226 N_("GMMR0FreePages failed to free %u pages"),
227 pReq->cPages);
228}
229
230
231/**
232 * Cleans up a GMMR0FreePages request.
233 * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
234 */
235GMMR3DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq)
236{
237 RTMemTmpFree(pReq);
238}
239
240
241/**
242 * Frees allocated pages, for bailing out on failure.
243 *
244 * This will not call VMSetError on failure but will use AssertLogRel instead.
245 *
246 * @param pVM The cross context VM structure.
247 * @param pAllocReq The allocation request to undo.
248 */
249GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq)
250{
251 uint32_t cb = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[pAllocReq->cPages]);
252 PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
253 AssertLogRelReturnVoid(pReq);
254
255 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
256 pReq->Hdr.cbReq = cb;
257 pReq->enmAccount = pAllocReq->enmAccount;
258 pReq->cPages = pAllocReq->cPages;
259 uint32_t iPage = pAllocReq->cPages;
260 while (iPage-- > 0)
261 {
262 Assert(pAllocReq->aPages[iPage].idPage != NIL_GMM_PAGEID);
263 pReq->aPages[iPage].idPage = pAllocReq->aPages[iPage].idPage;
264 }
265
266 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
267 AssertLogRelRC(rc);
268
269 RTMemTmpFree(pReq);
270}
271
272
273/**
274 * @see GMMR0BalloonedPages
275 */
276GMMR3DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
277{
278 int rc;
279 if (!SUPR3IsDriverless())
280 {
281 GMMBALLOONEDPAGESREQ Req;
282 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
283 Req.Hdr.cbReq = sizeof(Req);
284 Req.enmAction = enmAction;
285 Req.cBalloonedPages = cBalloonedPages;
286
287 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_BALLOONED_PAGES, 0, &Req.Hdr);
288 }
289 /*
290 * Ignore reset and fail all other requests.
291 */
292 else if (enmAction == GMMBALLOONACTION_RESET && cBalloonedPages == 0)
293 rc = VINF_SUCCESS;
294 else
295 rc = VERR_SUP_DRIVERLESS;
296 return rc;
297}
298
299
300/**
301 * @note Caller does the driverless check.
302 * @see GMMR0QueryVMMMemoryStatsReq
303 */
304GMMR3DECL(int) GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages, uint64_t *puTotalBalloonSize)
305{
306 GMMMEMSTATSREQ Req;
307 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
308 Req.Hdr.cbReq = sizeof(Req);
309 Req.cAllocPages = 0;
310 Req.cFreePages = 0;
311 Req.cBalloonedPages = 0;
312 Req.cSharedPages = 0;
313
314 *pcTotalAllocPages = 0;
315 *pcTotalFreePages = 0;
316 *pcTotalBalloonPages = 0;
317 *puTotalBalloonSize = 0;
318
319 /* Must be callable from any thread, so can't use VMMR3CallR0. */
320 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS, 0, &Req.Hdr);
321 if (rc == VINF_SUCCESS)
322 {
323 *pcTotalAllocPages = Req.cAllocPages;
324 *pcTotalFreePages = Req.cFreePages;
325 *pcTotalBalloonPages = Req.cBalloonedPages;
326 *puTotalBalloonSize = Req.cSharedPages;
327 }
328 return rc;
329}
330
331
332/**
333 * @see GMMR0QueryMemoryStatsReq
334 */
335GMMR3DECL(int) GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages)
336{
337 GMMMEMSTATSREQ Req;
338 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
339 Req.Hdr.cbReq = sizeof(Req);
340 Req.cAllocPages = 0;
341 Req.cFreePages = 0;
342 Req.cBalloonedPages = 0;
343
344 *pcAllocPages = 0;
345 *pcMaxPages = 0;
346 *pcBalloonPages = 0;
347
348 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_QUERY_MEM_STATS, 0, &Req.Hdr);
349 if (rc == VINF_SUCCESS)
350 {
351 *pcAllocPages = Req.cAllocPages;
352 *pcMaxPages = Req.cMaxPages;
353 *pcBalloonPages = Req.cBalloonedPages;
354 }
355 return rc;
356}
357
358
359/**
360 * @see GMMR0MapUnmapChunk
361 */
362GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
363{
364 GMMMAPUNMAPCHUNKREQ Req;
365 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
366 Req.Hdr.cbReq = sizeof(Req);
367 Req.idChunkMap = idChunkMap;
368 Req.idChunkUnmap = idChunkUnmap;
369 Req.pvR3 = NULL;
370 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
371 if (RT_SUCCESS(rc) && ppvR3)
372 *ppvR3 = Req.pvR3;
373 return rc;
374}
375
376
377/**
378 * @see GMMR0FreeLargePage
379 */
380GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage)
381{
382 GMMFREELARGEPAGEREQ Req;
383 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
384 Req.Hdr.cbReq = sizeof(Req);
385 Req.idPage = idPage;
386 return VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_LARGE_PAGE, 0, &Req.Hdr);
387}
388
389
390/**
391 * @see GMMR0RegisterSharedModule
392 */
393GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq)
394{
395 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
396 pReq->Hdr.cbReq = RT_UOFFSETOF_DYN(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]);
397 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_REGISTER_SHARED_MODULE, 0, &pReq->Hdr);
398 if (rc == VINF_SUCCESS)
399 rc = pReq->rc;
400 return rc;
401}
402
403
404/**
405 * @see GMMR0RegisterSharedModule
406 */
407GMMR3DECL(int) GMMR3UnregisterSharedModule(PVM pVM, PGMMUNREGISTERSHAREDMODULEREQ pReq)
408{
409 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
410 pReq->Hdr.cbReq = sizeof(*pReq);
411 return VMMR3CallR0(pVM, VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE, 0, &pReq->Hdr);
412}
413
414
415/**
416 * @see GMMR0ResetSharedModules
417 */
418GMMR3DECL(int) GMMR3ResetSharedModules(PVM pVM)
419{
420 if (!SUPR3IsDriverless())
421 return VMMR3CallR0(pVM, VMMR0_DO_GMM_RESET_SHARED_MODULES, 0, NULL);
422 return VINF_SUCCESS;
423}
424
425
426/**
427 * @see GMMR0CheckSharedModules
428 */
429GMMR3DECL(int) GMMR3CheckSharedModules(PVM pVM)
430{
431 return VMMR3CallR0(pVM, VMMR0_DO_GMM_CHECK_SHARED_MODULES, 0, NULL);
432}
433
434
435#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
436/**
437 * @see GMMR0FindDuplicatePage
438 */
439GMMR3DECL(bool) GMMR3IsDuplicatePage(PVM pVM, uint32_t idPage)
440{
441 GMMFINDDUPLICATEPAGEREQ Req;
442 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
443 Req.Hdr.cbReq = sizeof(Req);
444 Req.idPage = idPage;
445 Req.fDuplicate = false;
446
447 /* Must be callable from any thread, so can't use VMMR3CallR0. */
448 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_FIND_DUPLICATE_PAGE, 0, &Req.Hdr);
449 if (rc == VINF_SUCCESS)
450 return Req.fDuplicate;
451 return false;
452}
453#endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */
454
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette