VirtualBox

source: vbox/trunk/src/VBox/VMM/GMM.cpp@ 29613

Last change on this file since 29613 was 29613, checked in by vboxsync, 14 years ago

Pass back return code in the request packet to preserve informational codes. (VMMR3CallR0 chokes on them)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 12.6 KB
Line 
1/* $Id: GMM.cpp 29613 2010-05-18 11:40:07Z vboxsync $ */
2/** @file
3 * GMM - Global Memory Manager, ring-3 request wrappers.
4 */
5
6/*
7 * Copyright (C) 2008 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_GMM
23#include <VBox/gmm.h>
24#include <VBox/vmm.h>
25#include <VBox/vm.h>
26#include <VBox/sup.h>
27#include <VBox/err.h>
28#include <VBox/param.h>
29
30#include <iprt/assert.h>
31#include <VBox/log.h>
32#include <iprt/mem.h>
33#include <iprt/string.h>
34
35
36/**
37 * @see GMMR0InitialReservation
38 */
39GMMR3DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
40 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority)
41{
42 GMMINITIALRESERVATIONREQ Req;
43 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
44 Req.Hdr.cbReq = sizeof(Req);
45 Req.cBasePages = cBasePages;
46 Req.cShadowPages = cShadowPages;
47 Req.cFixedPages = cFixedPages;
48 Req.enmPolicy = enmPolicy;
49 Req.enmPriority = enmPriority;
50 return VMMR3CallR0(pVM, VMMR0_DO_GMM_INITIAL_RESERVATION, 0, &Req.Hdr);
51}
52
53
54/**
55 * @see GMMR0UpdateReservation
56 */
57GMMR3DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
58{
59 GMMUPDATERESERVATIONREQ Req;
60 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
61 Req.Hdr.cbReq = sizeof(Req);
62 Req.cBasePages = cBasePages;
63 Req.cShadowPages = cShadowPages;
64 Req.cFixedPages = cFixedPages;
65 return VMMR3CallR0(pVM, VMMR0_DO_GMM_UPDATE_RESERVATION, 0, &Req.Hdr);
66}
67
68
69/**
70 * Prepares a GMMR0AllocatePages request.
71 *
72 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
73 * @param pVM Pointer to the shared VM structure.
74 * @param[out] ppReq Where to store the pointer to the request packet.
75 * @param cPages The number of pages that's to be allocated.
76 * @param enmAccount The account to charge.
77 */
78GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
79{
80 uint32_t cb = RT_OFFSETOF(GMMALLOCATEPAGESREQ, aPages[cPages]);
81 PGMMALLOCATEPAGESREQ pReq = (PGMMALLOCATEPAGESREQ)RTMemTmpAllocZ(cb);
82 if (!pReq)
83 return VERR_NO_TMP_MEMORY;
84
85 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
86 pReq->Hdr.cbReq = cb;
87 pReq->enmAccount = enmAccount;
88 pReq->cPages = cPages;
89 NOREF(pVM);
90 *ppReq = pReq;
91 return VINF_SUCCESS;
92}
93
94
95/**
96 * Performs a GMMR0AllocatePages request.
97 * This will call VMSetError on failure.
98 *
99 * @returns VBox status code.
100 * @param pVM Pointer to the shared VM structure.
101 * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
102 */
103GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq)
104{
105 for (unsigned i = 0; ; i++)
106 {
107 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr);
108 if (RT_SUCCESS(rc))
109 {
110#ifdef LOG_ENABLED
111 for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++)
112 Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp\n",
113 pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys));
114#endif
115 return rc;
116 }
117 if (rc != VERR_GMM_SEED_ME)
118 return VMSetError(pVM, rc, RT_SRC_POS,
119 N_("GMMR0AllocatePages failed to allocate %u pages"),
120 pReq->cPages);
121 Assert(i < pReq->cPages);
122
123 /*
124 * Seed another chunk.
125 */
126 void *pvChunk;
127 rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
128 if (RT_FAILURE(rc))
129 return VMSetError(pVM, rc, RT_SRC_POS,
130 N_("Out of memory (SUPR3PageAlloc) seeding a %u pages allocation request"),
131 pReq->cPages);
132
133 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
134 if (RT_FAILURE(rc))
135 return VMSetError(pVM, rc, RT_SRC_POS, N_("GMM seeding failed"));
136 }
137}
138
139
140/**
141 * Cleans up a GMMR0AllocatePages request.
142 * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
143 */
144GMMR3DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq)
145{
146 RTMemTmpFree(pReq);
147}
148
149
150/**
151 * Prepares a GMMR0FreePages request.
152 *
153 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
154 * @param pVM Pointer to the shared VM structure.
155 * @param[out] ppReq Where to store the pointer to the request packet.
156 * @param cPages The number of pages that's to be freed.
157 * @param enmAccount The account to charge.
158 */
159GMMR3DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
160{
161 uint32_t cb = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cPages]);
162 PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
163 if (!pReq)
164 return VERR_NO_TMP_MEMORY;
165
166 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
167 pReq->Hdr.cbReq = cb;
168 pReq->enmAccount = enmAccount;
169 pReq->cPages = cPages;
170 NOREF(pVM);
171 *ppReq = pReq;
172 return VINF_SUCCESS;
173}
174
175
176/**
177 * Re-prepares a GMMR0FreePages request.
178 *
179 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
180 * @param pVM Pointer to the shared VM structure.
181 * @param pReq A request buffer previously returned by
182 * GMMR3FreePagesPrepare().
183 * @param cPages The number of pages originally passed to
184 * GMMR3FreePagesPrepare().
185 * @param enmAccount The account to charge.
186 */
187GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount)
188{
189 Assert(pReq->Hdr.u32Magic == SUPVMMR0REQHDR_MAGIC);
190 pReq->Hdr.cbReq = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cPages]);
191 pReq->enmAccount = enmAccount;
192 pReq->cPages = cPages;
193 NOREF(pVM);
194}
195
196
197/**
198 * Performs a GMMR0FreePages request.
199 * This will call VMSetError on failure.
200 *
201 * @returns VBox status code.
202 * @param pVM Pointer to the shared VM structure.
203 * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
204 * @param cActualPages The number of pages actually freed.
205 */
206GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages)
207{
208 /*
209 * Adjust the request if we ended up with fewer pages than anticipated.
210 */
211 if (cActualPages != pReq->cPages)
212 {
213 AssertReturn(cActualPages < pReq->cPages, VERR_INTERNAL_ERROR);
214 if (!cActualPages)
215 return VINF_SUCCESS;
216 pReq->cPages = cActualPages;
217 pReq->Hdr.cbReq = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cActualPages]);
218 }
219
220 /*
221 * Do the job.
222 */
223 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
224 if (RT_SUCCESS(rc))
225 return rc;
226 AssertRC(rc);
227 return VMSetError(pVM, rc, RT_SRC_POS,
228 N_("GMMR0FreePages failed to free %u pages"),
229 pReq->cPages);
230}
231
232
233/**
234 * Cleans up a GMMR0FreePages request.
235 * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
236 */
237GMMR3DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq)
238{
239 RTMemTmpFree(pReq);
240}
241
242
243/**
244 * Frees allocated pages, for bailing out on failure.
245 *
246 * This will not call VMSetError on failure but will use AssertLogRel instead.
247 *
248 * @param pVM Pointer to the shared VM structure.
249 * @param pAllocReq The allocation request to undo.
250 */
251GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq)
252{
253 uint32_t cb = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[pAllocReq->cPages]);
254 PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
255 AssertLogRelReturnVoid(pReq);
256
257 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
258 pReq->Hdr.cbReq = cb;
259 pReq->enmAccount = pAllocReq->enmAccount;
260 pReq->cPages = pAllocReq->cPages;
261 uint32_t iPage = pAllocReq->cPages;
262 while (iPage-- > 0)
263 {
264 Assert(pAllocReq->aPages[iPage].idPage != NIL_GMM_PAGEID);
265 pReq->aPages[iPage].idPage = pAllocReq->aPages[iPage].idPage;
266 }
267
268 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
269 AssertLogRelRC(rc);
270
271 RTMemTmpFree(pReq);
272}
273
274
275/**
276 * @see GMMR0BalloonedPages
277 */
278GMMR3DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
279{
280 GMMBALLOONEDPAGESREQ Req;
281 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
282 Req.Hdr.cbReq = sizeof(Req);
283 Req.enmAction = enmAction;
284 Req.cBalloonedPages = cBalloonedPages;
285
286 return VMMR3CallR0(pVM, VMMR0_DO_GMM_BALLOONED_PAGES, 0, &Req.Hdr);
287}
288
289/**
290 * @see GMMR0QueryVMMMemoryStatsReq
291 */
292GMMR3DECL(int) GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages)
293{
294 GMMMEMSTATSREQ Req;
295 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
296 Req.Hdr.cbReq = sizeof(Req);
297 Req.cAllocPages = 0;
298 Req.cFreePages = 0;
299 Req.cBalloonedPages = 0;
300
301 *pcTotalAllocPages = 0;
302 *pcTotalFreePages = 0;
303 *pcTotalBalloonPages = 0;
304
305 /* Must be callable from any thread, so can't use VMMR3CallR0. */
306 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0, VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS, 0, &Req.Hdr);
307 if (rc == VINF_SUCCESS)
308 {
309 *pcTotalAllocPages = Req.cAllocPages;
310 *pcTotalFreePages = Req.cFreePages;
311 *pcTotalBalloonPages = Req.cBalloonedPages;
312 }
313 return rc;
314}
315
316/**
317 * @see GMMR0QueryMemoryStatsReq
318 */
319GMMR3DECL(int) GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages)
320{
321 GMMMEMSTATSREQ Req;
322 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
323 Req.Hdr.cbReq = sizeof(Req);
324 Req.cAllocPages = 0;
325 Req.cFreePages = 0;
326 Req.cBalloonedPages = 0;
327
328 *pcAllocPages = 0;
329 *pcMaxPages = 0;
330 *pcBalloonPages = 0;
331
332 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_QUERY_MEM_STATS, 0, &Req.Hdr);
333 if (rc == VINF_SUCCESS)
334 {
335 *pcAllocPages = Req.cAllocPages;
336 *pcMaxPages = Req.cMaxPages;
337 *pcBalloonPages = Req.cBalloonedPages;
338 }
339 return rc;
340}
341
342/**
343 * @see GMMR0MapUnmapChunk
344 */
345GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
346{
347 GMMMAPUNMAPCHUNKREQ Req;
348 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
349 Req.Hdr.cbReq = sizeof(Req);
350 Req.idChunkMap = idChunkMap;
351 Req.idChunkUnmap = idChunkUnmap;
352 Req.pvR3 = NULL;
353 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
354 if (RT_SUCCESS(rc) && ppvR3)
355 *ppvR3 = Req.pvR3;
356 return rc;
357}
358
359/**
360 * @see GMMR0FreeLargePage
361 */
362GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage)
363{
364 GMMFREELARGEPAGEREQ Req;
365 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
366 Req.Hdr.cbReq = sizeof(Req);
367 Req.idPage = idPage;
368 return VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_LARGE_PAGE, 0, &Req.Hdr);
369}
370
371/**
372 * @see GMMR0SeedChunk
373 */
374GMMR3DECL(int) GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3)
375{
376 return VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvR3, NULL);
377}
378
379
380/**
381 * @see GMMR0RegisterSharedModule
382 */
383GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq)
384{
385 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
386 pReq->Hdr.cbReq = RT_OFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]);
387 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_REGISTER_SHARED_MODULE, 0, &pReq->Hdr);
388 if (rc == VINF_SUCCESS)
389 rc = pReq->rc;
390 return rc;
391}
392
393/**
394 * @see GMMR0RegisterSharedModule
395 */
396GMMR3DECL(int) GMMR3UnregisterSharedModule(PVM pVM, PGMMUNREGISTERSHAREDMODULEREQ pReq)
397{
398 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
399 pReq->Hdr.cbReq = sizeof(*pReq);
400 return VMMR3CallR0(pVM, VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE, 0, &pReq->Hdr);
401}
402
403/**
404 * @see GMMR0ResetSharedModules
405 */
406GMMR3DECL(int) GMMR3ResetSharedModules(PVM pVM)
407{
408 return VMMR3CallR0(pVM, VMMR0_DO_GMM_RESET_SHARED_MODULES, 0, NULL);
409}
410
411/**
412 * @see GMMR0CheckSharedModules
413 */
414GMMR3DECL(int) GMMR3CheckSharedModules(PVM pVM)
415{
416 return VMMR3CallR0(pVM, VMMR0_DO_GMM_CHECK_SHARED_MODULES, 0, NULL);
417}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette