VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/GMM.cpp@ 94319

Last change on this file since 94319 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 13.3 KB
Line 
1/* $Id: GMM.cpp 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * GMM - Global Memory Manager, ring-3 request wrappers.
4 */
5
6/*
7 * Copyright (C) 2008-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_GMM
23#include <VBox/vmm/gmm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/vmcc.h>
26#include <VBox/sup.h>
27#include <VBox/err.h>
28#include <VBox/param.h>
29
30#include <iprt/assert.h>
31#include <VBox/log.h>
32#include <iprt/mem.h>
33#include <iprt/string.h>
34
35
36/**
37 * @see GMMR0InitialReservation
38 */
39GMMR3DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
40 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority)
41{
42 if (!SUPR3IsDriverless())
43 {
44 GMMINITIALRESERVATIONREQ Req;
45 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
46 Req.Hdr.cbReq = sizeof(Req);
47 Req.cBasePages = cBasePages;
48 Req.cShadowPages = cShadowPages;
49 Req.cFixedPages = cFixedPages;
50 Req.enmPolicy = enmPolicy;
51 Req.enmPriority = enmPriority;
52 return VMMR3CallR0(pVM, VMMR0_DO_GMM_INITIAL_RESERVATION, 0, &Req.Hdr);
53 }
54 return VINF_SUCCESS;
55}
56
57
58/**
59 * @see GMMR0UpdateReservation
60 */
61GMMR3DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
62{
63 if (!SUPR3IsDriverless())
64 {
65 GMMUPDATERESERVATIONREQ Req;
66 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
67 Req.Hdr.cbReq = sizeof(Req);
68 Req.cBasePages = cBasePages;
69 Req.cShadowPages = cShadowPages;
70 Req.cFixedPages = cFixedPages;
71 return VMMR3CallR0(pVM, VMMR0_DO_GMM_UPDATE_RESERVATION, 0, &Req.Hdr);
72 }
73 return VINF_SUCCESS;
74}
75
76
77/**
78 * Prepares a GMMR0AllocatePages request.
79 *
80 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
81 * @param pVM The cross context VM structure.
82 * @param[out] ppReq Where to store the pointer to the request packet.
83 * @param cPages The number of pages that's to be allocated.
84 * @param enmAccount The account to charge.
85 */
86GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
87{
88 uint32_t cb = RT_UOFFSETOF_DYN(GMMALLOCATEPAGESREQ, aPages[cPages]);
89 PGMMALLOCATEPAGESREQ pReq = (PGMMALLOCATEPAGESREQ)RTMemTmpAllocZ(cb);
90 if (!pReq)
91 return VERR_NO_TMP_MEMORY;
92
93 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
94 pReq->Hdr.cbReq = cb;
95 pReq->enmAccount = enmAccount;
96 pReq->cPages = cPages;
97 NOREF(pVM);
98 *ppReq = pReq;
99 return VINF_SUCCESS;
100}
101
102
103/**
104 * Performs a GMMR0AllocatePages request.
105 *
106 * This will call VMSetError on failure.
107 *
108 * @returns VBox status code.
109 * @param pVM The cross context VM structure.
110 * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
111 */
112GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq)
113{
114 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr);
115 if (RT_SUCCESS(rc))
116 {
117#ifdef LOG_ENABLED
118 for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++)
119 Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp fZeroed=%d\n",
120 pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys, pReq->aPages[iPage].fZeroed));
121#endif
122 return rc;
123 }
124 return VMSetError(pVM, rc, RT_SRC_POS, N_("GMMR0AllocatePages failed to allocate %u pages"), pReq->cPages);
125}
126
127
128/**
129 * Cleans up a GMMR0AllocatePages request.
130 * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
131 */
132GMMR3DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq)
133{
134 RTMemTmpFree(pReq);
135}
136
137
138/**
139 * Prepares a GMMR0FreePages request.
140 *
141 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
142 * @param pVM The cross context VM structure.
143 * @param[out] ppReq Where to store the pointer to the request packet.
144 * @param cPages The number of pages that's to be freed.
145 * @param enmAccount The account to charge.
146 */
147GMMR3DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
148{
149 uint32_t cb = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[cPages]);
150 PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
151 if (!pReq)
152 return VERR_NO_TMP_MEMORY;
153
154 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
155 pReq->Hdr.cbReq = cb;
156 pReq->enmAccount = enmAccount;
157 pReq->cPages = cPages;
158 NOREF(pVM);
159 *ppReq = pReq;
160 return VINF_SUCCESS;
161}
162
163
164/**
165 * Re-prepares a GMMR0FreePages request.
166 *
167 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
168 * @param pVM The cross context VM structure.
169 * @param pReq A request buffer previously returned by
170 * GMMR3FreePagesPrepare().
171 * @param cPages The number of pages originally passed to
172 * GMMR3FreePagesPrepare().
173 * @param enmAccount The account to charge.
174 */
175GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount)
176{
177 Assert(pReq->Hdr.u32Magic == SUPVMMR0REQHDR_MAGIC);
178 pReq->Hdr.cbReq = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[cPages]);
179 pReq->enmAccount = enmAccount;
180 pReq->cPages = cPages;
181 NOREF(pVM);
182}
183
184
185/**
186 * Performs a GMMR0FreePages request.
187 * This will call VMSetError on failure.
188 *
189 * @returns VBox status code.
190 * @param pVM The cross context VM structure.
191 * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
192 * @param cActualPages The number of pages actually freed.
193 */
194GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages)
195{
196 /*
197 * Adjust the request if we ended up with fewer pages than anticipated.
198 */
199 if (cActualPages != pReq->cPages)
200 {
201 AssertReturn(cActualPages < pReq->cPages, VERR_GMM_ACTUAL_PAGES_IPE);
202 if (!cActualPages)
203 return VINF_SUCCESS;
204 pReq->cPages = cActualPages;
205 pReq->Hdr.cbReq = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[cActualPages]);
206 }
207
208 /*
209 * Do the job.
210 */
211 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
212 if (RT_SUCCESS(rc))
213 return rc;
214 AssertRC(rc);
215 return VMSetError(pVM, rc, RT_SRC_POS,
216 N_("GMMR0FreePages failed to free %u pages"),
217 pReq->cPages);
218}
219
220
221/**
222 * Cleans up a GMMR0FreePages request.
223 * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
224 */
225GMMR3DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq)
226{
227 RTMemTmpFree(pReq);
228}
229
230
231/**
232 * Frees allocated pages, for bailing out on failure.
233 *
234 * This will not call VMSetError on failure but will use AssertLogRel instead.
235 *
236 * @param pVM The cross context VM structure.
237 * @param pAllocReq The allocation request to undo.
238 */
239GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq)
240{
241 uint32_t cb = RT_UOFFSETOF_DYN(GMMFREEPAGESREQ, aPages[pAllocReq->cPages]);
242 PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
243 AssertLogRelReturnVoid(pReq);
244
245 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
246 pReq->Hdr.cbReq = cb;
247 pReq->enmAccount = pAllocReq->enmAccount;
248 pReq->cPages = pAllocReq->cPages;
249 uint32_t iPage = pAllocReq->cPages;
250 while (iPage-- > 0)
251 {
252 Assert(pAllocReq->aPages[iPage].idPage != NIL_GMM_PAGEID);
253 pReq->aPages[iPage].idPage = pAllocReq->aPages[iPage].idPage;
254 }
255
256 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
257 AssertLogRelRC(rc);
258
259 RTMemTmpFree(pReq);
260}
261
262
263/**
264 * @see GMMR0BalloonedPages
265 */
266GMMR3DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
267{
268 int rc;
269 if (!SUPR3IsDriverless())
270 {
271 GMMBALLOONEDPAGESREQ Req;
272 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
273 Req.Hdr.cbReq = sizeof(Req);
274 Req.enmAction = enmAction;
275 Req.cBalloonedPages = cBalloonedPages;
276
277 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_BALLOONED_PAGES, 0, &Req.Hdr);
278 }
279 /*
280 * Ignore reset and fail all other requests.
281 */
282 else if (enmAction == GMMBALLOONACTION_RESET && cBalloonedPages == 0)
283 rc = VINF_SUCCESS;
284 else
285 rc = VERR_SUP_DRIVERLESS;
286 return rc;
287}
288
289
290/**
291 * @note Caller does the driverless check.
292 * @see GMMR0QueryVMMMemoryStatsReq
293 */
294GMMR3DECL(int) GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages, uint64_t *puTotalBalloonSize)
295{
296 GMMMEMSTATSREQ Req;
297 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
298 Req.Hdr.cbReq = sizeof(Req);
299 Req.cAllocPages = 0;
300 Req.cFreePages = 0;
301 Req.cBalloonedPages = 0;
302 Req.cSharedPages = 0;
303
304 *pcTotalAllocPages = 0;
305 *pcTotalFreePages = 0;
306 *pcTotalBalloonPages = 0;
307 *puTotalBalloonSize = 0;
308
309 /* Must be callable from any thread, so can't use VMMR3CallR0. */
310 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS, 0, &Req.Hdr);
311 if (rc == VINF_SUCCESS)
312 {
313 *pcTotalAllocPages = Req.cAllocPages;
314 *pcTotalFreePages = Req.cFreePages;
315 *pcTotalBalloonPages = Req.cBalloonedPages;
316 *puTotalBalloonSize = Req.cSharedPages;
317 }
318 return rc;
319}
320
321
322/**
323 * @see GMMR0QueryMemoryStatsReq
324 */
325GMMR3DECL(int) GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages)
326{
327 GMMMEMSTATSREQ Req;
328 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
329 Req.Hdr.cbReq = sizeof(Req);
330 Req.cAllocPages = 0;
331 Req.cFreePages = 0;
332 Req.cBalloonedPages = 0;
333
334 *pcAllocPages = 0;
335 *pcMaxPages = 0;
336 *pcBalloonPages = 0;
337
338 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_QUERY_MEM_STATS, 0, &Req.Hdr);
339 if (rc == VINF_SUCCESS)
340 {
341 *pcAllocPages = Req.cAllocPages;
342 *pcMaxPages = Req.cMaxPages;
343 *pcBalloonPages = Req.cBalloonedPages;
344 }
345 return rc;
346}
347
348
349/**
350 * @see GMMR0MapUnmapChunk
351 */
352GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
353{
354 GMMMAPUNMAPCHUNKREQ Req;
355 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
356 Req.Hdr.cbReq = sizeof(Req);
357 Req.idChunkMap = idChunkMap;
358 Req.idChunkUnmap = idChunkUnmap;
359 Req.pvR3 = NULL;
360 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
361 if (RT_SUCCESS(rc) && ppvR3)
362 *ppvR3 = Req.pvR3;
363 return rc;
364}
365
366
367/**
368 * @see GMMR0FreeLargePage
369 */
370GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage)
371{
372 GMMFREELARGEPAGEREQ Req;
373 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
374 Req.Hdr.cbReq = sizeof(Req);
375 Req.idPage = idPage;
376 return VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_LARGE_PAGE, 0, &Req.Hdr);
377}
378
379
380/**
381 * @see GMMR0RegisterSharedModule
382 */
383GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq)
384{
385 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
386 pReq->Hdr.cbReq = RT_UOFFSETOF_DYN(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]);
387 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_REGISTER_SHARED_MODULE, 0, &pReq->Hdr);
388 if (rc == VINF_SUCCESS)
389 rc = pReq->rc;
390 return rc;
391}
392
393
394/**
395 * @see GMMR0RegisterSharedModule
396 */
397GMMR3DECL(int) GMMR3UnregisterSharedModule(PVM pVM, PGMMUNREGISTERSHAREDMODULEREQ pReq)
398{
399 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
400 pReq->Hdr.cbReq = sizeof(*pReq);
401 return VMMR3CallR0(pVM, VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE, 0, &pReq->Hdr);
402}
403
404
405/**
406 * @see GMMR0ResetSharedModules
407 */
408GMMR3DECL(int) GMMR3ResetSharedModules(PVM pVM)
409{
410 if (!SUPR3IsDriverless())
411 return VMMR3CallR0(pVM, VMMR0_DO_GMM_RESET_SHARED_MODULES, 0, NULL);
412 return VINF_SUCCESS;
413}
414
415
416/**
417 * @see GMMR0CheckSharedModules
418 */
419GMMR3DECL(int) GMMR3CheckSharedModules(PVM pVM)
420{
421 return VMMR3CallR0(pVM, VMMR0_DO_GMM_CHECK_SHARED_MODULES, 0, NULL);
422}
423
424
425#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
426/**
427 * @see GMMR0FindDuplicatePage
428 */
429GMMR3DECL(bool) GMMR3IsDuplicatePage(PVM pVM, uint32_t idPage)
430{
431 GMMFINDDUPLICATEPAGEREQ Req;
432 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
433 Req.Hdr.cbReq = sizeof(Req);
434 Req.idPage = idPage;
435 Req.fDuplicate = false;
436
437 /* Must be callable from any thread, so can't use VMMR3CallR0. */
438 int rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_FIND_DUPLICATE_PAGE, 0, &Req.Hdr);
439 if (rc == VINF_SUCCESS)
440 return Req.fDuplicate;
441 return false;
442}
443#endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */
444
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette