VirtualBox

source: vbox/trunk/include/VBox/pgm.h@ 5285

Last change on this file since 5285 was 5040, checked in by vboxsync, 17 years ago

GC phys/virt to HC virt functions are no longer accessible in our PDM interface.
Rewrote disassembly functions to use the mapping functions.

Code that runs in EMT (like CSAM/PATM) can still use the old conversion functions. Easier to use
and (far) less overhead.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 66.5 KB
Line 
1/** @file
2 * PGM - Page Monitor/Monitor.
3 */
4
5/*
6 * Copyright (C) 2006-2007 innotek GmbH
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License as published by the Free Software Foundation,
12 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
13 * distribution. VirtualBox OSE is distributed in the hope that it will
14 * be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16
17#ifndef ___VBox_pgm_h
18#define ___VBox_pgm_h
19
20#include <VBox/cdefs.h>
21#include <VBox/types.h>
22#include <VBox/sup.h>
23#include <VBox/cpum.h>
24#include <VBox/vmapi.h>
25
26__BEGIN_DECLS
27
28/** @defgroup grp_pgm The Page Monitor/Manager API
29 * @{
30 */
31
32/** Enable dynamic allocation of guest physical RAM. */
33#define PGM_DYNAMIC_RAM_ALLOC
34
35/** Chunk size for dynamically allocated physical memory. */
36#define PGM_DYNAMIC_CHUNK_SIZE (1*1024*1024)
37/** Shift GC physical address by 20 bits to get the offset into the pvHCChunkHC array. */
38#define PGM_DYNAMIC_CHUNK_SHIFT 20
39/** Dynamic chunk offset mask. */
40#define PGM_DYNAMIC_CHUNK_OFFSET_MASK 0xfffff
41/** Dynamic chunk base mask. */
42#define PGM_DYNAMIC_CHUNK_BASE_MASK (~(RTGCPHYS)PGM_DYNAMIC_CHUNK_OFFSET_MASK)
43
44
45/** Page flags used for PGMHyperSetPageFlags
46 * @deprecated
47 * @{ */
48#define PGMPAGE_READ 1
49#define PGMPAGE_WRITE 2
50#define PGMPAGE_USER 4
51#define PGMPAGE_SYSTEM 8
52#define PGMPAGE_NOTPRESENT 16
53/** @} */
54
55
56/**
57 * FNPGMRELOCATE callback mode.
58 */
59typedef enum PGMRELOCATECALL
60{
61 /** The callback is for checking if the suggested address is suitable. */
62 PGMRELOCATECALL_SUGGEST = 1,
63 /** The callback is for executing the relocation. */
64 PGMRELOCATECALL_RELOCATE
65} PGMRELOCATECALL;
66
67
68/**
69 * Callback function which will be called when PGM is trying to find
70 * a new location for the mapping.
71 *
72 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
73 * In 1) the callback should say if it objects to a suggested new location. If it
74 * accepts the new location, it is called again for doing it's relocation.
75 *
76 *
77 * @returns true if the location is ok.
78 * @returns false if another location should be found.
79 * @param GCPtrOld The old virtual address.
80 * @param GCPtrNew The new virtual address.
81 * @param enmMode Used to indicate the callback mode.
82 * @param pvUser User argument.
83 * @remark The return value is no a failure indicator, it's an acceptance
84 * indicator. Relocation can not fail!
85 */
86typedef DECLCALLBACK(bool) FNPGMRELOCATE(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
87/** Pointer to a relocation callback function. */
88typedef FNPGMRELOCATE *PFNPGMRELOCATE;
89
90
91/**
92 * Physical page access handler type.
93 */
94typedef enum PGMPHYSHANDLERTYPE
95{
96 /** MMIO range. Pages are not present, all access is done in interpreter or recompiler. */
97 PGMPHYSHANDLERTYPE_MMIO = 1,
98 /** Handle all normal page faults for a physical page range. */
99 PGMPHYSHANDLERTYPE_PHYSICAL,
100 /** Handler all write access to a physical page range. */
101 PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
102 /** Handler all access to a physical page range. */
103 PGMPHYSHANDLERTYPE_PHYSICAL_ALL
104
105} PGMPHYSHANDLERTYPE;
106
107/**
108 * \#PF Handler callback for physical access handler ranges (MMIO among others) in GC.
109 *
110 * @returns VBox status code (appropriate for GC return).
111 * @param pVM VM Handle.
112 * @param uErrorCode CPU Error code.
113 * @param pRegFrame Trap register frame.
114 * NULL on DMA and other non CPU access.
115 * @param pvFault The fault address (cr2).
116 * @param GCPhysFault The GC physical address corresponding to pvFault.
117 * @param pvUser User argument.
118 */
119typedef DECLCALLBACK(int) FNPGMGCPHYSHANDLER(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
120/** Pointer to PGM access callback. */
121typedef FNPGMGCPHYSHANDLER *PFNPGMGCPHYSHANDLER;
122
123/**
124 * \#PF Handler callback for physical access handler ranges (MMIO among others) in R0.
125 *
126 * @returns VBox status code (appropriate for GC return).
127 * @param pVM VM Handle.
128 * @param uErrorCode CPU Error code.
129 * @param pRegFrame Trap register frame.
130 * NULL on DMA and other non CPU access.
131 * @param pvFault The fault address (cr2).
132 * @param GCPhysFault The GC physical address corresponding to pvFault.
133 * @param pvUser User argument.
134 */
135typedef DECLCALLBACK(int) FNPGMR0PHYSHANDLER(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
136/** Pointer to PGM access callback. */
137typedef FNPGMR0PHYSHANDLER *PFNPGMR0PHYSHANDLER;
138
139/**
140 * Guest Access type
141 */
142typedef enum PGMACCESSTYPE
143{
144 /** Read access. */
145 PGMACCESSTYPE_READ = 1,
146 /** Write access. */
147 PGMACCESSTYPE_WRITE
148} PGMACCESSTYPE;
149
150/**
151 * \#PF Handler callback for physical access handler ranges (MMIO among others) in HC.
152 *
153 * The handler can not raise any faults, it's mainly for monitoring write access
154 * to certain pages.
155 *
156 * @returns VINF_SUCCESS if the handler have carried out the operation.
157 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
158 * @param pVM VM Handle.
159 * @param GCPhys The physical address the guest is writing to.
160 * @param pvPhys The HC mapping of that address.
161 * @param pvBuf What the guest is reading/writing.
162 * @param cbBuf How much it's reading/writing.
163 * @param enmAccessType The access type.
164 * @param pvUser User argument.
165 */
166typedef DECLCALLBACK(int) FNPGMR3PHYSHANDLER(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
167/** Pointer to PGM access callback. */
168typedef FNPGMR3PHYSHANDLER *PFNPGMR3PHYSHANDLER;
169
170
171/**
172 * Virtual access handler type.
173 */
174typedef enum PGMVIRTHANDLERTYPE
175{
176 /** Natural traps only. */
177 PGMVIRTHANDLERTYPE_NORMAL = 1,
178 /** Write access handled. */
179 PGMVIRTHANDLERTYPE_WRITE,
180 /** All access handled. */
181 PGMVIRTHANDLERTYPE_ALL,
182 /** By eip - Natural traps only. */
183 PGMVIRTHANDLERTYPE_EIP,
184 /** Hypervisor write access handled.
185 * This is used to catch the guest trying to write to LDT, TSS and any other
186 * system structure which the brain dead intel guys let unprivilegde code find. */
187 PGMVIRTHANDLERTYPE_HYPERVISOR
188
189} PGMVIRTHANDLERTYPE;
190
191/**
192 * \#PF Handler callback for virtual access handler ranges.
193 *
194 * Important to realize that a physical page in a range can have aliases, and
195 * for ALL and WRITE handlers these will also trigger.
196 *
197 * @returns VBox status code (appropriate for GC return).
198 * @param pVM VM Handle.
199 * @param uErrorCode CPU Error code.
200 * @param pRegFrame Trap register frame.
201 * @param pvFault The fault address (cr2).
202 * @param pvRange The base address of the handled virtual range.
203 * @param offRange The offset of the access into this range.
204 * (If it's a EIP range this's the EIP, if not it's pvFault.)
205 */
206typedef DECLCALLBACK(int) FNPGMGCVIRTHANDLER(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange);
207/** Pointer to PGM access callback. */
208typedef FNPGMGCVIRTHANDLER *PFNPGMGCVIRTHANDLER;
209
210/**
211 * \#PF Handler callback for virtual access handler ranges.
212 *
213 * Important to realize that a physical page in a range can have aliases, and
214 * for ALL and WRITE handlers these will also trigger.
215 *
216 * @returns VINF_SUCCESS if the handler have carried out the operation.
217 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
218 * @param pVM VM Handle.
219 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
220 * @param pvPtr The HC mapping of that address.
221 * @param pvBuf What the guest is reading/writing.
222 * @param cbBuf How much it's reading/writing.
223 * @param enmAccessType The access type.
224 * @param pvUser User argument.
225 */
226typedef DECLCALLBACK(int) FNPGMHCVIRTHANDLER(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
227/** Pointer to PGM access callback. */
228typedef FNPGMHCVIRTHANDLER *PFNPGMHCVIRTHANDLER;
229
230
231/**
232 * \#PF Handler callback for invalidation of virtual access handler ranges.
233 *
234 * @param pVM VM Handle.
235 * @param GCPtr The virtual address the guest has changed.
236 */
237typedef DECLCALLBACK(int) FNPGMHCVIRTINVALIDATE(PVM pVM, RTGCPTR GCPtr);
238/** Pointer to PGM invalidation callback. */
239typedef FNPGMHCVIRTINVALIDATE *PFNPGMHCVIRTINVALIDATE;
240
241/**
242 * Paging mode.
243 */
244typedef enum PGMMODE
245{
246 /** The usual invalid value. */
247 PGMMODE_INVALID = 0,
248 /** Real mode. */
249 PGMMODE_REAL,
250 /** Protected mode, no paging. */
251 PGMMODE_PROTECTED,
252 /** 32-bit paging. */
253 PGMMODE_32_BIT,
254 /** PAE paging. */
255 PGMMODE_PAE,
256 /** PAE paging with NX enabled. */
257 PGMMODE_PAE_NX,
258 /** 64-bit AMD paging (long mode). */
259 PGMMODE_AMD64,
260 /** 64-bit AMD paging (long mode) with NX enabled. */
261 PGMMODE_AMD64_NX,
262 /** The max number of modes */
263 PGMMODE_MAX,
264 /** 32bit hackishness. */
265 PGMMODE_32BIT_HACK = 0x7fffffff
266} PGMMODE;
267
268
269/**
270 * Gets the current CR3 register value for the shadow memory context.
271 * @returns CR3 value.
272 * @param pVM The VM handle.
273 */
274PGMDECL(uint32_t) PGMGetHyperCR3(PVM pVM);
275
276/**
277 * Gets the CR3 register value for the 32-Bit shadow memory context.
278 * @returns CR3 value.
279 * @param pVM The VM handle.
280 */
281PGMDECL(uint32_t) PGMGetHyper32BitCR3(PVM pVM);
282
283/**
284 * Gets the CR3 register value for the PAE shadow memory context.
285 * @returns CR3 value.
286 * @param pVM The VM handle.
287 */
288PGMDECL(uint32_t) PGMGetHyperPaeCR3(PVM pVM);
289
290/**
291 * Gets the CR3 register value for the AMD64 shadow memory context.
292 * @returns CR3 value.
293 * @param pVM The VM handle.
294 */
295PGMDECL(uint32_t) PGMGetHyperAmd64CR3(PVM pVM);
296
297/**
298 * Gets the current CR3 register value for the HC intermediate memory context.
299 * @returns CR3 value.
300 * @param pVM The VM handle.
301 */
302PGMDECL(uint32_t) PGMGetInterHCCR3(PVM pVM);
303
304/**
305 * Gets the current CR3 register value for the GC intermediate memory context.
306 * @returns CR3 value.
307 * @param pVM The VM handle.
308 */
309PGMDECL(uint32_t) PGMGetInterGCCR3(PVM pVM);
310
311/**
312 * Gets the CR3 register value for the 32-Bit intermediate memory context.
313 * @returns CR3 value.
314 * @param pVM The VM handle.
315 */
316PGMDECL(uint32_t) PGMGetInter32BitCR3(PVM pVM);
317
318/**
319 * Gets the CR3 register value for the PAE intermediate memory context.
320 * @returns CR3 value.
321 * @param pVM The VM handle.
322 */
323PGMDECL(uint32_t) PGMGetInterPaeCR3(PVM pVM);
324
325/**
326 * Gets the CR3 register value for the AMD64 intermediate memory context.
327 * @returns CR3 value.
328 * @param pVM The VM handle.
329 */
330PGMDECL(uint32_t) PGMGetInterAmd64CR3(PVM pVM);
331
332/**
333 * \#PF Handler.
334 *
335 * @returns VBox status code (appropriate for GC return).
336 * @param pVM VM Handle.
337 * @param uErr The trap error code.
338 * @param pRegFrame Trap register frame.
339 * @param pvFault The fault address.
340 */
341PGMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
342
343/**
344 * Prefetch a page/set of pages.
345 *
346 * Typically used to sync commonly used pages before entering raw mode
347 * after a CR3 reload.
348 *
349 * @returns VBox status code suitable for scheduling.
350 * @retval VINF_SUCCESS on success.
351 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
352 * @param pVM VM handle.
353 * @param GCPtrPage Page to prefetch.
354 */
355PGMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage);
356
357/**
358 * Verifies a range of pages for read or write access.
359 *
360 * Supports handling of pages marked for dirty bit tracking and CSAM.
361 *
362 * @returns VBox status code.
363 * @param pVM VM handle.
364 * @param Addr Guest virtual address to check.
365 * @param cbSize Access size.
366 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*).
367 */
368PGMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess);
369
370/**
371 * Verifies a range of pages for read or write access
372 *
373 * Only checks the guest's page tables
374 *
375 * @returns VBox status code.
376 * @param pVM VM handle.
377 * @param Addr Guest virtual address to check
378 * @param cbSize Access size
379 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
380 */
381PGMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess);
382
383/**
384 * Executes an instruction using the interpreter.
385 *
386 * @returns VBox status code (appropriate for trap handling and GC return).
387 * @param pVM VM handle.
388 * @param pRegFrame Register frame.
389 * @param pvFault Fault address.
390 */
391PGMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);
392
393/**
394 * Maps a range of physical pages at a given virtual address
395 * in the guest context.
396 *
397 * The GC virtual address range must be within an existing mapping.
398 *
399 * @returns VBox status code.
400 * @param pVM The virtual machine.
401 * @param GCPtr Where to map the page(s). Must be page aligned.
402 * @param HCPhys Start of the range of physical pages. Must be page aligned.
403 * @param cbPages Number of bytes to map. Must be page aligned.
404 * @param fFlags Page flags (X86_PTE_*).
405 */
406PGMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags);
407
408/**
409 * Sets (replaces) the page flags for a range of pages in a mapping.
410 *
411 * The pages must be mapped pages, it's not possible to change the flags of
412 * Guest OS pages.
413 *
414 * @returns VBox status.
415 * @param pVM VM handle.
416 * @param GCPtr Virtual address of the first page in the range.
417 * @param cb Size (in bytes) of the range to apply the modification to.
418 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
419 */
420PGMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags);
421
422/**
423 * Modify page flags for a range of pages in a mapping.
424 *
425 * The existing flags are ANDed with the fMask and ORed with the fFlags.
426 *
427 * @returns VBox status code.
428 * @param pVM VM handle.
429 * @param GCPtr Virtual address of the first page in the range.
430 * @param cb Size (in bytes) of the range to apply the modification to.
431 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
432 * @param fMask The AND mask - page flags X86_PTE_*.
433 * Be very CAREFUL when ~'ing constants which could be 32-bit!
434 */
435PGMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
436
437/**
438 * Gets effective page information (from the VMM page directory).
439 *
440 * @returns VBox status.
441 * @param pVM VM Handle.
442 * @param GCPtr Guest Context virtual address of the page.
443 * @param pfFlags Where to store the flags. These are X86_PTE_*.
444 * @param pHCPhys Where to store the HC physical address of the page.
445 * This is page aligned.
446 * @remark You should use PGMMapGetPage() for pages in a mapping.
447 */
448PGMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
449
450/**
451 * Sets (replaces) the page flags for a range of pages in the shadow context.
452 *
453 * @returns VBox status.
454 * @param pVM VM handle.
455 * @param GCPtr The address of the first page.
456 * @param cb The size of the range in bytes.
457 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
458 * @remark You must use PGMMapSetPage() for pages in a mapping.
459 */
460PGMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags);
461
462/**
463 * Modify page flags for a range of pages in the shadow context.
464 *
465 * The existing flags are ANDed with the fMask and ORed with the fFlags.
466 *
467 * @returns VBox status code.
468 * @param pVM VM handle.
469 * @param GCPtr Virtual address of the first page in the range.
470 * @param cb Size (in bytes) of the range to apply the modification to.
471 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
472 * @param fMask The AND mask - page flags X86_PTE_*.
473 * Be very CAREFUL when ~'ing constants which could be 32-bit!
474 * @remark You must use PGMMapModifyPage() for pages in a mapping.
475 */
476PGMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
477
478/**
479 * Gets effective Guest OS page information.
480 *
481 * When GCPtr is in a big page, the function will return as if it was a normal
482 * 4KB page. If the need for distinguishing between big and normal page becomes
483 * necessary at a later point, a PGMGstGetPageEx() will be created for that
484 * purpose.
485 *
486 * @returns VBox status.
487 * @param pVM VM Handle.
488 * @param GCPtr Guest Context virtual address of the page.
489 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
490 * @param pGCPhys Where to store the GC physical address of the page.
491 * This is page aligned. The fact that the
492 */
493PGMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
494
495/**
496 * Checks if the page is present.
497 *
498 * @returns true if the page is present.
499 * @returns false if the page is not present.
500 * @param pVM The VM handle.
501 * @param GCPtr Address within the page.
502 */
503PGMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr);
504
505/**
506 * Sets (replaces) the page flags for a range of pages in the guest's tables.
507 *
508 * @returns VBox status.
509 * @param pVM VM handle.
510 * @param GCPtr The address of the first page.
511 * @param cb The size of the range in bytes.
512 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
513 */
514PGMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags);
515
516/**
517 * Modify page flags for a range of pages in the guest's tables
518 *
519 * The existing flags are ANDed with the fMask and ORed with the fFlags.
520 *
521 * @returns VBox status code.
522 * @param pVM VM handle.
523 * @param GCPtr Virtual address of the first page in the range.
524 * @param cb Size (in bytes) of the range to apply the modification to.
525 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
526 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
527 * Be very CAREFUL when ~'ing constants which could be 32-bit!
528 */
529PGMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
530
531/**
532 * Performs and schedules necessary updates following a CR3 load or reload.
533 *
534 * This will normally involve mapping the guest PD or nPDPTR
535 *
536 * @returns VBox status code.
537 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
538 * safely be ignored and overridden since the FF will be set too then.
539 * @param pVM VM handle.
540 * @param cr3 The new cr3.
541 * @param fGlobal Indicates whether this is a global flush or not.
542 */
543PGMDECL(int) PGMFlushTLB(PVM pVM, uint32_t cr3, bool fGlobal);
544
545/**
546 * Synchronize the paging structures.
547 *
548 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
549 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
550 * in several places, most importantly whenever the CR3 is loaded.
551 *
552 * @returns VBox status code.
553 * @param pVM The virtual machine.
554 * @param cr0 Guest context CR0 register
555 * @param cr3 Guest context CR3 register
556 * @param cr4 Guest context CR4 register
557 * @param fGlobal Including global page directories or not
558 */
559PGMDECL(int) PGMSyncCR3(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal);
560
561/**
562 * Called whenever CR0 or CR4 in a way which may change
563 * the paging mode.
564 *
565 * @returns VBox status code fit for scheduling in GC and R0.
566 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
567 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
568 * @param pVM VM handle.
569 * @param cr0 The new cr0.
570 * @param cr4 The new cr4.
571 * @param efer The new extended feature enable register.
572 */
573PGMDECL(int) PGMChangeMode(PVM pVM, uint32_t cr0, uint32_t cr4, uint64_t efer);
574
575/**
576 * Gets the current guest paging mode.
577 *
578 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
579 *
580 * @returns The current paging mode.
581 * @param pVM The VM handle.
582 */
583PGMDECL(PGMMODE) PGMGetGuestMode(PVM pVM);
584
585/**
586 * Gets the current shadow paging mode.
587 *
588 * @returns The current paging mode.
589 * @param pVM The VM handle.
590 */
591PGMDECL(PGMMODE) PGMGetShadowMode(PVM pVM);
592
593/**
594 * Get mode name.
595 *
596 * @returns read-only name string.
597 * @param enmMode The mode which name is desired.
598 */
599PGMDECL(const char *) PGMGetModeName(PGMMODE enmMode);
600
601/**
602 * Register a access handler for a physical range.
603 *
604 * @returns VBox status code.
605 * @param pVM VM Handle.
606 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
607 * @param GCPhys Start physical address.
608 * @param GCPhysLast Last physical address. (inclusive)
609 * @param pfnHandlerR3 The R3 handler.
610 * @param pvUserR3 User argument to the R3 handler.
611 * @param pfnHandlerR0 The R0 handler.
612 * @param pvUserR0 User argument to the R0 handler.
613 * @param pfnHandlerGC The GC handler.
614 * @param pvUserGC User argument to the GC handler.
615 * This must be a GC pointer because it will be relocated!
616 * @param pszDesc Pointer to description string. This must not be freed.
617 */
618PGMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
619 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
620 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
621 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
622 R3PTRTYPE(const char *) pszDesc);
623
624/**
625 * Modify a physical page access handler.
626 *
627 * Modification can only be done to the range it self, not the type or anything else.
628 *
629 * @returns VBox status code.
630 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
631 * and a new registration must be performed!
632 * @param pVM VM handle.
633 * @param GCPhysCurrent Current location.
634 * @param GCPhys New location.
635 * @param GCPhysLast New last location.
636 */
637PGMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast);
638
639/**
640 * Register a physical page access handler.
641 *
642 * @returns VBox status code.
643 * @param pVM VM Handle.
644 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
645 */
646PGMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys);
647
648/**
649 * Changes the callbacks associated with a physical access handler.
650 *
651 * @returns VBox status code.
652 * @param pVM VM Handle.
653 * @param GCPhys Start physical address.
654 * @param pfnHandlerR3 The R3 handler.
655 * @param pvUserR3 User argument to the R3 handler.
656 * @param pfnHandlerR0 The R0 handler.
657 * @param pvUserR0 User argument to the R0 handler.
658 * @param pfnHandlerGC The GC handler.
659 * @param pvUserGC User argument to the GC handler.
660 * This must be a GC pointer because it will be relocated!
661 * @param pszDesc Pointer to description string. This must not be freed.
662 */
663PGMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
664 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
665 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
666 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
667 R3PTRTYPE(const char *) pszDesc);
668
669/**
670 * Splitts a physical access handler in two.
671 *
672 * @returns VBox status code.
673 * @param pVM VM Handle.
674 * @param GCPhys Start physical address of the handler.
675 * @param GCPhysSplit The split address.
676 */
677PGMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit);
678
679/**
680 * Joins up two adjacent physical access handlers which has the same callbacks.
681 *
682 * @returns VBox status code.
683 * @param pVM VM Handle.
684 * @param GCPhys1 Start physical address of the first handler.
685 * @param GCPhys2 Start physical address of the second handler.
686 */
687PGMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2);
688
689/**
690 * Temporarily turns off the access monitoring of a page within a monitored
691 * physical write/all page access handler region.
692 *
693 * Use this when no further #PFs are required for that page. Be aware that
694 * a page directory sync might reset the flags, and turn on access monitoring
695 * for the page.
696 *
697 * The caller must do required page table modifications.
698 *
699 * @returns VBox status code.
700 * @param pVM VM Handle
701 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
702 * @param GCPhysPage Physical address of the page to turn off access monitoring for.
703 */
704PGMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage);
705
706
707/**
708 * Resets any modifications to individual pages in a physical
709 * page access handler region.
710 *
711 * This is used in pair with PGMHandlerPhysicalModify().
712 *
713 * @returns VBox status code.
714 * @param pVM VM Handle
715 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
716 */
717PGMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys);
718
719/**
720 * Turns access monitoring of a page within a monitored
721 * physical write/all page access handler region back on.
722 *
723 * The caller must do required page table modifications.
724 *
725 * @returns VBox status code.
726 * @param pVM VM Handle
727 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
728 * @param GCPhysPage Physical address of the page to turn on access monitoring for.
729 */
730PGMDECL(int) PGMHandlerPhysicalPageReset(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage);
731
732/**
733 * Checks if a physical range is handled
734 *
735 * @returns boolean.
736 * @param pVM VM Handle
737 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
738 */
739PGMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys);
740
741/**
742 * Checks if Address Gate 20 is enabled or not.
743 *
744 * @returns true if enabled.
745 * @returns false if disabled.
746 * @param pVM VM handle.
747 */
748PGMDECL(bool) PGMPhysIsA20Enabled(PVM pVM);
749
750/**
751 * Validates a GC physical address.
752 *
753 * @returns true if valid.
754 * @returns false if invalid.
755 * @param pVM The VM handle.
756 * @param GCPhys The physical address to validate.
757 */
758PGMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys);
759
760/**
761 * Checks if a GC physical address is a normal page,
762 * i.e. not ROM, MMIO or reserved.
763 *
764 * @returns true if normal.
765 * @returns false if invalid, ROM, MMIO or reserved page.
766 * @param pVM The VM handle.
767 * @param GCPhys The physical address to check.
768 */
769PGMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys);
770
771/**
772 * Converts a GC physical address to a HC physical address.
773 *
774 * @returns VINF_SUCCESS on success.
775 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
776 * page but has no physical backing.
777 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
778 * GC physical address.
779 * @param pVM The VM handle.
780 * @param GCPhys The GC physical address to convert.
781 * @param pHCPhys Where to store the HC physical address on success.
782 */
783PGMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys);
784
785/**
786 * Converts a guest pointer to a GC physical address.
787 *
788 * This uses the current CR3/CR0/CR4 of the guest.
789 *
790 * @returns VBox status code.
791 * @param pVM The VM Handle
792 * @param GCPtr The guest pointer to convert.
793 * @param pGCPhys Where to store the GC physical address.
794 */
795PGMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys);
796
797/**
798 * Converts a guest pointer to a HC physical address.
799 *
800 * This uses the current CR3/CR0/CR4 of the guest.
801 *
802 * @returns VBox status code.
803 * @param pVM The VM Handle
804 * @param GCPtr The guest pointer to convert.
805 * @param pHCPhys Where to store the HC physical address.
806 */
807PGMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys);
808
809
810/**
811 * Invalidates the GC page mapping TLB.
812 *
813 * @param pVM The VM handle.
814 */
815PDMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM);
816
817/**
818 * Invalidates the ring-0 page mapping TLB.
819 *
820 * @param pVM The VM handle.
821 */
822PDMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM);
823
824/**
825 * Invalidates the ring-3 page mapping TLB.
826 *
827 * @param pVM The VM handle.
828 */
829PDMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM);
830
831/**
832 * Page mapping lock.
833 *
834 * @remarks This doesn't work in structures shared between
835 * ring-3, ring-0 and/or GC.
836 */
837typedef struct PGMPAGEMAPLOCK
838{
839 /** @todo see PGMPhysIsPageMappingLockValid for possibly incorrect assumptions */
840#ifdef IN_GC
841 /** Just a dummy for the time being. */
842 uint32_t u32Dummy;
843#else
844 /** Pointer to the PGMPAGE. */
845 void *pvPage;
846 /** Pointer to the PGMCHUNKR3MAP. */
847 void *pvMap;
848#endif
849} PGMPAGEMAPLOCK;
850/** Pointer to a page mapping lock. */
851typedef PGMPAGEMAPLOCK *PPGMPAGEMAPLOCK;
852
853/**
854 * Requests the mapping of a guest page into the current context.
855 *
856 * This API should only be used for very short term, as it will consume
857 * scarse resources (R0 and GC) in the mapping cache. When you're done
858 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
859 *
860 * This API will assume your intention is to write to the page, and will
861 * therefore replace shared and zero pages. If you do not intend to modify
862 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
863 *
864 * @returns VBox status code.
865 * @retval VINF_SUCCESS on success.
866 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
867 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
868 *
869 * @param pVM The VM handle.
870 * @param GCPhys The guest physical address of the page that should be mapped.
871 * @param ppv Where to store the address corresponding to GCPhys.
872 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
873 *
874 * @remark Avoid calling this API from within critical sections (other than
875 * the PGM one) because of the deadlock risk.
876 * @thread Any thread.
877 */
878PGMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock);
879
880/**
881 * Requests the mapping of a guest page into the current context.
882 *
883 * This API should only be used for very short term, as it will consume
884 * scarse resources (R0 and GC) in the mapping cache. When you're done
885 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
886 *
887 * @returns VBox status code.
888 * @retval VINF_SUCCESS on success.
889 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
890 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
891 *
892 * @param pVM The VM handle.
893 * @param GCPhys The guest physical address of the page that should be mapped.
894 * @param ppv Where to store the address corresponding to GCPhys.
895 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
896 *
897 * @remark Avoid calling this API from within critical sections (other than
898 * the PGM one) because of the deadlock risk.
899 * @thread Any thread.
900 */
901PGMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void * const *ppv, PPGMPAGEMAPLOCK pLock);
902
903/**
904 * Requests the mapping of a guest page given by virtual address into the current context.
905 *
906 * This API should only be used for very short term, as it will consume
907 * scarse resources (R0 and GC) in the mapping cache. When you're done
908 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
909 *
910 * This API will assume your intention is to write to the page, and will
911 * therefore replace shared and zero pages. If you do not intend to modify
912 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
913 *
914 * @returns VBox status code.
915 * @retval VINF_SUCCESS on success.
916 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
917 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
918 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
919 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
920 *
921 * @param pVM The VM handle.
922 * @param GCPhys The guest physical address of the page that should be mapped.
923 * @param ppv Where to store the address corresponding to GCPhys.
924 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
925 *
926 * @remark Avoid calling this API from within critical sections (other than
927 * the PGM one) because of the deadlock risk.
928 * @thread EMT
929 */
930PGMDECL(int) PGMPhysGCPtr2CCPtr(PVM pVM, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock);
931
932/**
933 * Requests the mapping of a guest page given by virtual address into the current context.
934 *
935 * This API should only be used for very short term, as it will consume
936 * scarse resources (R0 and GC) in the mapping cache. When you're done
937 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
938 *
939 * @returns VBox status code.
940 * @retval VINF_SUCCESS on success.
941 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
942 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
943 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
944 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
945 *
946 * @param pVM The VM handle.
947 * @param GCPhys The guest physical address of the page that should be mapped.
948 * @param ppv Where to store the address corresponding to GCPhys.
949 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
950 *
951 * @remark Avoid calling this API from within critical sections (other than
952 * the PGM one) because of the deadlock risk.
953 * @thread EMT
954 */
955PGMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVM pVM, RTGCPTR GCPtr, void * const *ppv, PPGMPAGEMAPLOCK pLock);
956
957/**
958 * Release the mapping of a guest page.
959 *
960 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
961 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
962 *
963 * @param pVM The VM handle.
964 * @param pLock The lock structure initialized by the mapping function.
965 */
966PGMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock);
967
968
969/**
970 * Checks if the lock structure is valid
971 *
972 * @param pVM The VM handle.
973 * @param pLock The lock structure initialized by the mapping function.
974 */
975DECLINLINE(bool) PGMPhysIsPageMappingLockValid(PVM pVM, PPGMPAGEMAPLOCK pLock)
976{
977 /** @todo -> complete/change this */
978#ifdef IN_GC
979 return !!(pLock->u32Dummy);
980#else
981 return !!(pLock->pvPage);
982#endif
983}
984
985/**
986 * Converts a GC physical address to a HC pointer.
987 *
988 * @returns VINF_SUCCESS on success.
989 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
990 * page but has no physical backing.
991 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
992 * GC physical address.
993 * @param pVM The VM handle.
994 * @param GCPhys The GC physical address to convert.
995 * @param cbRange Physical range
996 * @param pHCPtr Where to store the HC pointer on success.
997 *
998 * @remark Do *not* assume this mapping will be around forever!
999 */
1000PGMDECL(int) PGMPhysGCPhys2HCPtr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTHCPTR pHCPtr);
1001
1002/**
1003 * Converts a guest pointer to a HC pointer.
1004 *
1005 * This uses the current CR3/CR0/CR4 of the guest.
1006 *
1007 * @returns VBox status code.
1008 * @param pVM The VM Handle
1009 * @param GCPtr The guest pointer to convert.
1010 * @param pHCPtr Where to store the HC virtual address.
1011 *
1012 * @remark Do *not* assume this mapping will be around forever!
1013 */
1014PGMDECL(int) PGMPhysGCPtr2HCPtr(PVM pVM, RTGCPTR GCPtr, PRTHCPTR pHCPtr);
1015
1016/**
1017 * Converts a guest virtual address to a HC pointer by specfied CR3 and flags.
1018 *
1019 * @returns VBox status code.
1020 * @param pVM The VM Handle
1021 * @param GCPtr The guest pointer to convert.
1022 * @param cr3 The guest CR3.
1023 * @param fFlags Flags used for interpreting the PD correctly: X86_CR4_PSE and X86_CR4_PAE
1024 * @param pHCPtr Where to store the HC pointer.
1025 *
1026 * @remark Do *not* assume this mapping will be around forever!
1027 * @remark This function is used by the REM at a time where PGM could
1028 * potentially not be in sync. It could also be used by a
1029 * future DBGF API to cpu state independent conversions.
1030 */
1031PGMDECL(int) PGMPhysGCPtr2HCPtrByGstCR3(PVM pVM, RTGCPTR GCPtr, uint32_t cr3, unsigned fFlags, PRTHCPTR pHCPtr);
1032
1033/**
1034 * Read physical memory.
1035 *
1036 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1037 * want to ignore those.
1038 *
1039 * @param pVM VM Handle.
1040 * @param GCPhys Physical address start reading from.
1041 * @param pvBuf Where to put the read bits.
1042 * @param cbRead How many bytes to read.
1043 */
1044PGMDECL(void) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead);
1045
1046/**
1047 * Write to physical memory.
1048 *
1049 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1050 * want to ignore those.
1051 *
1052 * @param pVM VM Handle.
1053 * @param GCPhys Physical address to write to.
1054 * @param pvBuf What to write.
1055 * @param cbWrite How many bytes to write.
1056 */
1057PGMDECL(void) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite);
1058
1059
1060#ifndef IN_GC /* Only ring 0 & 3. */
1061
1062/**
1063 * Read from guest physical memory by GC physical address, bypassing
1064 * MMIO and access handlers.
1065 *
1066 * @returns VBox status.
1067 * @param pVM VM handle.
1068 * @param pvDst The destination address.
1069 * @param GCPhysSrc The source address (GC physical address).
1070 * @param cb The number of bytes to read.
1071 */
1072PGMDECL(int) PGMPhysReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb);
1073
1074/**
1075 * Write to guest physical memory referenced by GC pointer.
1076 * Write memory to GC physical address in guest physical memory.
1077 *
1078 * This will bypass MMIO and access handlers.
1079 *
1080 * @returns VBox status.
1081 * @param pVM VM handle.
1082 * @param GCPhysDst The GC physical address of the destination.
1083 * @param pvSrc The source buffer.
1084 * @param cb The number of bytes to write.
1085 */
1086PGMDECL(int) PGMPhysWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb);
1087
1088/**
1089 * Read from guest physical memory referenced by GC pointer.
1090 *
1091 * This function uses the current CR3/CR0/CR4 of the guest and will
1092 * bypass access handlers and not set any accessed bits.
1093 *
1094 * @returns VBox status.
1095 * @param pVM VM handle.
1096 * @param pvDst The destination address.
1097 * @param GCPtrSrc The source address (GC pointer).
1098 * @param cb The number of bytes to read.
1099 */
1100PGMDECL(int) PGMPhysReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb);
1101
1102/**
1103 * Write to guest physical memory referenced by GC pointer.
1104 *
1105 * This function uses the current CR3/CR0/CR4 of the guest and will
1106 * bypass access handlers and not set dirty or accessed bits.
1107 *
1108 * @returns VBox status.
1109 * @param pVM VM handle.
1110 * @param GCPtrDst The destination address (GC pointer).
1111 * @param pvSrc The source address.
1112 * @param cb The number of bytes to write.
1113 */
1114PGMDECL(int) PGMPhysWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
1115
1116/**
1117 * Read from guest physical memory referenced by GC pointer.
1118 *
1119 * This function uses the current CR3/CR0/CR4 of the guest and will
1120 * respect access handlers and set accessed bits.
1121 *
1122 * @returns VBox status.
1123 * @param pVM VM handle.
1124 * @param pvDst The destination address.
1125 * @param GCPtrSrc The source address (GC pointer).
1126 * @param cb The number of bytes to read.
1127 */
1128PGMDECL(int) PGMPhysReadGCPtrSafe(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb);
1129
1130/**
1131 * Write to guest physical memory referenced by GC pointer.
1132 *
1133 * This function uses the current CR3/CR0/CR4 of the guest and will
1134 * respect access handlers and set dirty and accessed bits.
1135 *
1136 * @returns VBox status.
1137 * @param pVM VM handle.
1138 * @param GCPtrDst The destination address (GC pointer).
1139 * @param pvSrc The source address.
1140 * @param cb The number of bytes to write.
1141 */
1142PGMDECL(int) PGMPhysWriteGCPtrSafe(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
1143
1144/**
1145 * Write to guest physical memory referenced by GC pointer and update the PTE.
1146 *
1147 * This function uses the current CR3/CR0/CR4 of the guest and will
1148 * bypass access handlers and set any dirty and accessed bits in the PTE.
1149 *
1150 * If you don't want to set the dirty bit, use PGMR3PhysWriteGCPtr().
1151 *
1152 * @returns VBox status.
1153 * @param pVM VM handle.
1154 * @param GCPtrDst The destination address (GC pointer).
1155 * @param pvSrc The source address.
1156 * @param cb The number of bytes to write.
1157 */
1158PGMDECL(int) PGMPhysWriteGCPtrDirty(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
1159
1160/**
1161 * Emulation of the invlpg instruction (HC only actually).
1162 *
1163 * @returns VBox status code.
1164 * @param pVM VM handle.
1165 * @param GCPtrPage Page to invalidate.
1166 * @remark ASSUMES the page table entry or page directory is
1167 * valid. Fairly safe, but there could be edge cases!
1168 * @todo Flush page or page directory only if necessary!
1169 */
1170PGMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage);
1171
1172#endif /* !IN_GC */
1173
1174/**
1175 * Performs a read of guest virtual memory for instruction emulation.
1176 *
1177 * This will check permissions, raise exceptions and update the access bits.
1178 *
1179 * The current implementation will bypass all access handlers. It may later be
1180 * changed to at least respect MMIO.
1181 *
1182 *
1183 * @returns VBox status code suitable to scheduling.
1184 * @retval VINF_SUCCESS if the read was performed successfully.
1185 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
1186 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
1187 *
1188 * @param pVM The VM handle.
1189 * @param pCtxCore The context core.
1190 * @param pvDst Where to put the bytes we've read.
1191 * @param GCPtrSrc The source address.
1192 * @param cb The number of bytes to read. Not more than a page.
1193 *
1194 * @remark This function will dynamically map physical pages in GC. This may unmap
1195 * mappings done by the caller. Be careful!
1196 */
1197PGMDECL(int) PGMPhysInterpretedRead(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb);
1198
1199#ifdef VBOX_STRICT
1200/**
1201 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1202 * that the physical addresses associated with virtual handlers are correct.
1203 *
1204 * @returns Number of mismatches.
1205 * @param pVM The VM handle.
1206 */
1207PGMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM);
1208
1209/**
1210 * Asserts that there are no mapping conflicts.
1211 *
1212 * @returns Number of conflicts.
1213 * @param pVM The VM Handle.
1214 */
1215PGMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM);
1216
1217/**
1218 * Asserts that everything related to the guest CR3 is correctly shadowed.
1219 *
1220 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1221 * and assert the correctness of the guest CR3 mapping before asserting that the
1222 * shadow page tables is in sync with the guest page tables.
1223 *
1224 * @returns Number of conflicts.
1225 * @param pVM The VM Handle.
1226 * @param cr3 The current guest CR3 register value.
1227 * @param cr4 The current guest CR4 register value.
1228 */
1229PGMDECL(unsigned) PGMAssertCR3(PVM pVM, uint32_t cr3, uint32_t cr4);
1230#endif /* VBOX_STRICT */
1231
1232
1233#ifdef IN_GC
1234
1235/** @defgroup grp_pgm_gc The PGM Guest Context API
1236 * @ingroup grp_pgm
1237 * @{
1238 */
1239
1240/**
1241 * Temporarily maps one guest page specified by GC physical address.
1242 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
1243 *
1244 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1245 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1246 *
1247 * @returns VBox status.
1248 * @param pVM VM handle.
1249 * @param GCPhys GC Physical address of the page.
1250 * @param ppv Where to store the address of the mapping.
1251 */
1252PGMGCDECL(int) PGMGCDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv);
1253
1254/**
1255 * Temporarily maps one guest page specified by unaligned GC physical address.
1256 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
1257 *
1258 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1259 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1260 *
1261 * The caller is aware that only the speicifed page is mapped and that really bad things
1262 * will happen if writing beyond the page!
1263 *
1264 * @returns VBox status.
1265 * @param pVM VM handle.
1266 * @param GCPhys GC Physical address within the page to be mapped.
1267 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
1268 */
1269PGMGCDECL(int) PGMGCDynMapGCPageEx(PVM pVM, RTGCPHYS GCPhys, void **ppv);
1270
1271/**
1272 * Temporarily maps one host page specified by HC physical address.
1273 *
1274 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1275 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1276 *
1277 * @returns VBox status.
1278 * @param pVM VM handle.
1279 * @param HCPhys HC Physical address of the page.
1280 * @param ppv Where to store the address of the mapping.
1281 */
1282PGMGCDECL(int) PGMGCDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv);
1283
1284/**
1285 * Syncs a guest os page table.
1286 *
1287 * @returns VBox status code.
1288 * @param pVM VM handle.
1289 * @param iPD Page directory index.
1290 * @param pPDSrc Source page directory (i.e. Guest OS page directory).
1291 * Assume this is a temporary mapping.
1292 */
1293PGMGCDECL(int) PGMGCSyncPT(PVM pVM, unsigned iPD, PVBOXPD pPDSrc);
1294
1295/**
1296 * Emulation of the invlpg instruction.
1297 *
1298 * @returns VBox status code.
1299 * @param pVM VM handle.
1300 * @param GCPtrPage Page to invalidate.
1301 */
1302PGMGCDECL(int) PGMGCInvalidatePage(PVM pVM, RTGCPTR GCPtrPage);
1303
1304/** @} */
1305#endif
1306
1307
1308#ifdef IN_RING0
1309/** @defgroup grp_pgm_r0 The PGM Host Context Ring-0 API
1310 * @ingroup grp_pgm
1311 * @{
1312 */
1313/**
1314 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
1315 *
1316 * @returns The following VBox status codes.
1317 * @retval VINF_SUCCESS on success. FF cleared.
1318 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
1319 *
1320 * @param pVM The VM handle.
1321 *
1322 * @remarks Must be called from within the PGM critical section.
1323 */
1324PGMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM);
1325
1326/** @} */
1327#endif
1328
1329
1330
1331#ifdef IN_RING3
1332/** @defgroup grp_pgm_r3 The PGM Host Context Ring-3 API
1333 * @ingroup grp_pgm
1334 * @{
1335 */
1336/**
1337 * Initiates the paging of VM.
1338 *
1339 * @returns VBox status code.
1340 * @param pVM Pointer to VM structure.
1341 */
1342PGMR3DECL(int) PGMR3Init(PVM pVM);
1343
1344/**
1345 * Init the PGM bits that rely on VMMR0 and MM to be fully initialized.
1346 *
1347 * The dynamic mapping area will also be allocated and initialized at this
1348 * time. We could allocate it during PGMR3Init of course, but the mapping
1349 * wouldn't be allocated at that time preventing us from setting up the
1350 * page table entries with the dummy page.
1351 *
1352 * @returns VBox status code.
1353 * @param pVM VM handle.
1354 */
1355PGMR3DECL(int) PGMR3InitDynMap(PVM pVM);
1356
1357/**
1358 * Ring-3 init finalizing.
1359 *
1360 * @returns VBox status code.
1361 * @param pVM The VM handle.
1362 */
1363PGMR3DECL(int) PGMR3InitFinalize(PVM pVM);
1364
1365/**
1366 * Applies relocations to data and code managed by this
1367 * component. This function will be called at init and
1368 * whenever the VMM need to relocate it self inside the GC.
1369 *
1370 * @param pVM The VM.
1371 * @param offDelta Relocation delta relative to old location.
1372 */
1373PGMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta);
1374
1375/**
1376 * The VM is being reset.
1377 *
1378 * For the PGM component this means that any PD write monitors
1379 * needs to be removed.
1380 *
1381 * @param pVM VM handle.
1382 */
1383PGMR3DECL(void) PGMR3Reset(PVM pVM);
1384
1385/**
1386 * Terminates the PGM.
1387 *
1388 * @returns VBox status code.
1389 * @param pVM Pointer to VM structure.
1390 */
1391PGMR3DECL(int) PGMR3Term(PVM pVM);
1392
1393/**
1394 * Serivce a VMMCALLHOST_PGM_LOCK call.
1395 *
1396 * @returns VBox status code.
1397 * @param pVM The VM handle.
1398 */
1399PDMR3DECL(int) PGMR3LockCall(PVM pVM);
1400
1401/**
1402 * Inform PGM if we want all mappings to be put into the shadow page table. (necessary for e.g. VMX)
1403 *
1404 * @returns VBox status code.
1405 * @param pVM VM handle.
1406 * @param fEnable Enable or disable shadow mappings
1407 */
1408PGMR3DECL(int) PGMR3ChangeShwPDMappings(PVM pVM, bool fEnable);
1409
1410/**
1411 * Allocate missing physical pages for an existing guest RAM range.
1412 *
1413 * @returns VBox status.
1414 * @param pVM The VM handle.
1415 * @param GCPhys GC physical address of the RAM range. (page aligned)
1416 */
1417PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
1418
1419/**
1420 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
1421 * registration APIs calls to inform PGM about memory registrations.
1422 *
1423 * It registers the physical memory range with PGM. MM is responsible
1424 * for the toplevel things - allocation and locking - while PGM is taking
1425 * care of all the details and implements the physical address space virtualization.
1426 *
1427 * @returns VBox status.
1428 * @param pVM The VM handle.
1429 * @param pvRam HC virtual address of the RAM range. (page aligned)
1430 * @param GCPhys GC physical address of the RAM range. (page aligned)
1431 * @param cb Size of the RAM range. (page aligned)
1432 * @param fFlags Flags, MM_RAM_*.
1433 * @param paPages Pointer an array of physical page descriptors.
1434 * @param pszDesc Description string.
1435 */
1436PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc);
1437
1438/**
1439 * Register a chunk of a the physical memory range with PGM. MM is responsible
1440 * for the toplevel things - allocation and locking - while PGM is taking
1441 * care of all the details and implements the physical address space virtualization.
1442 *
1443 * @returns VBox status.
1444 * @param pVM The VM handle.
1445 * @param pvRam HC virtual address of the RAM range. (page aligned)
1446 * @param GCPhys GC physical address of the RAM range. (page aligned)
1447 * @param cb Size of the RAM range. (page aligned)
1448 * @param fFlags Flags, MM_RAM_*.
1449 * @param paPages Pointer an array of physical page descriptors.
1450 * @param pszDesc Description string.
1451 */
1452PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc);
1453
1454/**
1455 * Interface MMIO handler relocation calls.
1456 *
1457 * It relocates an existing physical memory range with PGM.
1458 *
1459 * @returns VBox status.
1460 * @param pVM The VM handle.
1461 * @param GCPhysOld Previous GC physical address of the RAM range. (page aligned)
1462 * @param GCPhysNew New GC physical address of the RAM range. (page aligned)
1463 * @param cb Size of the RAM range. (page aligned)
1464 */
1465PGMR3DECL(int) PGMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, size_t cb);
1466
1467/**
1468 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
1469 * flags of existing RAM ranges.
1470 *
1471 * @returns VBox status.
1472 * @param pVM The VM handle.
1473 * @param GCPhys GC physical address of the RAM range. (page aligned)
1474 * @param cb Size of the RAM range. (page aligned)
1475 * @param fFlags The Or flags, MM_RAM_* #defines.
1476 * @param fMask The and mask for the flags.
1477 */
1478PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask);
1479
1480/**
1481 * Sets the Address Gate 20 state.
1482 *
1483 * @param pVM VM handle.
1484 * @param fEnable True if the gate should be enabled.
1485 * False if the gate should be disabled.
1486 */
1487PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable);
1488
1489/**
1490 * Creates a page table based mapping in GC.
1491 *
1492 * @returns VBox status code.
1493 * @param pVM VM Handle.
1494 * @param GCPtr Virtual Address. (Page table aligned!)
1495 * @param cb Size of the range. Must be a 4MB aligned!
1496 * @param pfnRelocate Relocation callback function.
1497 * @param pvUser User argument to the callback.
1498 * @param pszDesc Pointer to description string. This must not be freed.
1499 */
1500PGMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, size_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc);
1501
1502/**
1503 * Removes a page table based mapping.
1504 *
1505 * @returns VBox status code.
1506 * @param pVM VM Handle.
1507 * @param GCPtr Virtual Address. (Page table aligned!)
1508 */
1509PGMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr);
1510
1511/**
1512 * Gets the size of the current guest mappings if they were to be
1513 * put next to oneanother.
1514 *
1515 * @returns VBox status code.
1516 * @param pVM The VM.
1517 * @param pcb Where to store the size.
1518 */
1519PGMR3DECL(int) PGMR3MappingsSize(PVM pVM, size_t *pcb);
1520
1521/**
1522 * Fixes the guest context mappings in a range reserved from the Guest OS.
1523 *
1524 * @returns VBox status code.
1525 * @param pVM The VM.
1526 * @param GCPtrBase The address of the reserved range of guest memory.
1527 * @param cb The size of the range starting at GCPtrBase.
1528 */
1529PGMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, size_t cb);
1530
1531/**
1532 * Unfixes the mappings.
1533 * After calling this function mapping conflict detection will be enabled.
1534 *
1535 * @returns VBox status code.
1536 * @param pVM The VM.
1537 */
1538PGMR3DECL(int) PGMR3MappingsUnfix(PVM pVM);
1539
1540/**
1541 * Map pages into the intermediate context (switcher code).
1542 * These pages are mapped at both the give virtual address and at
1543 * the physical address (for identity mapping).
1544 *
1545 * @returns VBox status code.
1546 * @param pVM The virtual machine.
1547 * @param Addr Intermediate context address of the mapping.
1548 * @param HCPhys Start of the range of physical pages. This must be entriely below 4GB!
1549 * @param cbPages Number of bytes to map.
1550 *
1551 * @remark This API shall not be used to anything but mapping the switcher code.
1552 */
1553PGMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages);
1554
1555/**
1556 * Checks guest PD for conflicts with VMM GC mappings.
1557 *
1558 * @returns true if conflict detected.
1559 * @returns false if not.
1560 * @param pVM The virtual machine.
1561 * @param cr3 Guest context CR3 register.
1562 * @param fRawR0 Whether RawR0 is enabled or not.
1563 */
1564PGMR3DECL(bool) PGMR3MapHasConflicts(PVM pVM, uint32_t cr3, bool fRawR0);
1565
1566/**
1567 * Read memory from the guest mappings.
1568 *
1569 * This will use the page tables associated with the mappings to
1570 * read the memory. This means that not all kind of memory is readable
1571 * since we don't necessarily know how to convert that physical address
1572 * to a HC virtual one.
1573 *
1574 * @returns VBox status.
1575 * @param pVM VM handle.
1576 * @param pvDst The destination address (HC of course).
1577 * @param GCPtrSrc The source address (GC virtual address).
1578 * @param cb Number of bytes to read.
1579 */
1580PGMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb);
1581
1582/**
1583 * Register a access handler for a physical range.
1584 *
1585 * @returns VBox status code.
1586 * @param pVM VM handle.
1587 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
1588 * @param GCPhys Start physical address.
1589 * @param GCPhysLast Last physical address. (inclusive)
1590 * @param pfnHandlerR3 The R3 handler.
1591 * @param pvUserR3 User argument to the R3 handler.
1592 * @param pszModR0 The R0 handler module. NULL means default R0 module.
1593 * @param pszHandlerR0 The R0 handler symbol name.
1594 * @param pvUserR0 User argument to the R0 handler.
1595 * @param pszModGC The GC handler module. NULL means default GC module.
1596 * @param pszHandlerGC The GC handler symbol name.
1597 * @param pvUserGC User argument to the GC handler.
1598 * This must be a GC pointer because it will be relocated!
1599 * @param pszDesc Pointer to description string. This must not be freed.
1600 */
1601PGMR3DECL(int) PGMR3HandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
1602 PFNPGMR3PHYSHANDLER pfnHandlerR3, void *pvUserR3,
1603 const char *pszModR0, const char *pszHandlerR0, RTR0PTR pvUserR0,
1604 const char *pszModGC, const char *pszHandlerGC, RTGCPTR pvUserGC, const char *pszDesc);
1605
1606/**
1607 * Register an access handler for a virtual range.
1608 *
1609 * @returns VBox status code.
1610 * @param pVM VM handle.
1611 * @param enmType Handler type. Any of the PGMVIRTHANDLERTYPE_* enums.
1612 * @param GCPtr Start address.
1613 * @param GCPtrLast Last address. (inclusive)
1614 * @param pfnInvalidateHC The HC invalidate callback (can be 0)
1615 * @param pfnHandlerHC The HC handler.
1616 * @param pfnHandlerGC The GC handler.
1617 * @param pszDesc Pointer to description string. This must not be freed.
1618 */
1619PGMDECL(int) PGMHandlerVirtualRegisterEx(PVM pVM, PGMVIRTHANDLERTYPE enmType, RTGCPTR GCPtr, RTGCPTR GCPtrLast,
1620 PFNPGMHCVIRTINVALIDATE pfnInvalidateHC,
1621 PFNPGMHCVIRTHANDLER pfnHandlerHC, RTGCPTR pfnHandlerGC,
1622 R3PTRTYPE(const char *) pszDesc);
1623
1624/**
1625 * Register a access handler for a virtual range.
1626 *
1627 * @returns VBox status code.
1628 * @param pVM VM handle.
1629 * @param enmType Handler type. Any of the PGMVIRTHANDLERTYPE_* enums.
1630 * @param GCPtr Start address.
1631 * @param GCPtrLast Last address. (inclusive)
1632 * @param pfnInvalidateHC The HC invalidate callback (can be 0)
1633 * @param pfnHandlerHC The HC handler.
1634 * @param pszHandlerGC The GC handler symbol name.
1635 * @param pszModGC The GC handler module.
1636 * @param pszDesc Pointer to description string. This must not be freed.
1637 */
1638PGMR3DECL(int) PGMR3HandlerVirtualRegister(PVM pVM, PGMVIRTHANDLERTYPE enmType, RTGCPTR GCPtr, RTGCPTR GCPtrLast,
1639 PFNPGMHCVIRTINVALIDATE pfnInvalidateHC,
1640 PFNPGMHCVIRTHANDLER pfnHandlerHC,
1641 const char *pszHandlerGC, const char *pszModGC, const char *pszDesc);
1642
1643/**
1644 * Modify the page invalidation callback handler for a registered virtual range
1645 * (add more when needed)
1646 *
1647 * @returns VBox status code.
1648 * @param pVM VM handle.
1649 * @param GCPtr Start address.
1650 * @param pfnInvalidateHC The HC invalidate callback (can be 0)
1651 */
1652PGMDECL(int) PGMHandlerVirtualChangeInvalidateCallback(PVM pVM, RTGCPTR GCPtr, PFNPGMHCVIRTINVALIDATE pfnInvalidateHC);
1653
1654
1655/**
1656 * Deregister an access handler for a virtual range.
1657 *
1658 * @returns VBox status code.
1659 * @param pVM VM handle.
1660 * @param GCPtr Start address.
1661 */
1662PGMDECL(int) PGMHandlerVirtualDeregister(PVM pVM, RTGCPTR GCPtr);
1663
1664/**
1665 * Grows the shadow page pool.
1666 *
1667 * I.e. adds more pages to it, assuming that hasn't reached cMaxPages yet.
1668 *
1669 * @returns VBox status code.
1670 * @param pVM The VM handle.
1671 */
1672PDMR3DECL(int) PGMR3PoolGrow(PVM pVM);
1673
1674#ifdef ___VBox_dbgf_h /** @todo fix this! */
1675/**
1676 * Dumps a page table hierarchy use only physical addresses and cr4/lm flags.
1677 *
1678 * @returns VBox status code (VINF_SUCCESS).
1679 * @param pVM The VM handle.
1680 * @param cr3 The root of the hierarchy.
1681 * @param cr4 The cr4, only PAE and PSE is currently used.
1682 * @param fLongMode Set if long mode, false if not long mode.
1683 * @param cMaxDepth Number of levels to dump.
1684 * @param pHlp Pointer to the output functions.
1685 */
1686PGMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp);
1687#endif
1688
1689/**
1690 * Dumps a 32-bit guest page directory and page tables.
1691 *
1692 * @returns VBox status code (VINF_SUCCESS).
1693 * @param pVM The VM handle.
1694 * @param cr3 The root of the hierarchy.
1695 * @param cr4 The CR4, PSE is currently used.
1696 * @param PhysSearch Address to search for.
1697 */
1698PGMR3DECL(int) PGMR3DumpHierarchyGC(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCPHYS PhysSearch);
1699
1700/**
1701 * Debug helper - Dumps the supplied page directory.
1702 *
1703 * @internal
1704 */
1705PGMR3DECL(void) PGMR3DumpPD(PVM pVM, PVBOXPD pPD);
1706
1707/**
1708 * Dumps the the PGM mappings..
1709 *
1710 * @param pVM VM handle.
1711 */
1712PGMR3DECL(void) PGMR3DumpMappings(PVM pVM);
1713
1714/** @todo r=bird: s/Byte/U8/ s/Word/U16/ s/Dword/U32/ to match other functions names and returned types. */
1715/**
1716 * Read physical memory. (one byte)
1717 *
1718 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1719 * want to ignore those.
1720 *
1721 * @param pVM VM Handle.
1722 * @param GCPhys Physical address start reading from.
1723 */
1724PGMR3DECL(uint8_t) PGMR3PhysReadByte(PVM pVM, RTGCPHYS GCPhys);
1725
1726/**
1727 * Read physical memory. (one word)
1728 *
1729 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1730 * want to ignore those.
1731 *
1732 * @param pVM VM Handle.
1733 * @param GCPhys Physical address start reading from.
1734 */
1735PGMR3DECL(uint16_t) PGMR3PhysReadWord(PVM pVM, RTGCPHYS GCPhys);
1736
1737/**
1738 * Read physical memory. (one dword)
1739 *
1740 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1741 * want to ignore those.
1742 *
1743 * @param pVM VM Handle.
1744 * @param GCPhys Physical address start reading from.
1745 */
1746PGMR3DECL(uint32_t) PGMR3PhysReadDword(PVM pVM, RTGCPHYS GCPhys);
1747
1748/**
1749 * Write to physical memory. (one byte)
1750 *
1751 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1752 * want to ignore those.
1753 *
1754 * @param pVM VM Handle.
1755 * @param GCPhys Physical address to write to.
1756 * @param val What to write.
1757 */
1758PGMR3DECL(void) PGMR3PhysWriteByte(PVM pVM, RTGCPHYS GCPhys, uint8_t val);
1759
1760/**
1761 * Write to physical memory. (one word)
1762 *
1763 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1764 * want to ignore those.
1765 *
1766 * @param pVM VM Handle.
1767 * @param GCPhys Physical address to write to.
1768 * @param val What to write.
1769 */
1770PGMR3DECL(void) PGMR3PhysWriteWord(PVM pVM, RTGCPHYS GCPhys, uint16_t val);
1771
1772/**
1773 * Write to physical memory. (one dword)
1774 *
1775 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1776 * want to ignore those.
1777 *
1778 * @param pVM VM Handle.
1779 * @param GCPhys Physical address to write to.
1780 * @param val What to write.
1781 */
1782PGMR3DECL(void) PGMR3PhysWriteDword(PVM pVM, RTGCPHYS GCPhys, uint32_t val);
1783
1784/**
1785 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
1786 *
1787 * @returns see pgmR3PhysChunkMap.
1788 * @param pVM The VM handle.
1789 * @param idChunk The chunk to map.
1790 */
1791PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk);
1792
1793/**
1794 * Invalidates the TLB for the ring-3 mapping cache.
1795 *
1796 * @param pVM The VM handle.
1797 */
1798PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM);
1799
1800/**
1801 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
1802 *
1803 * @returns VBox status code.
1804 * @retval VINF_SUCCESS on success. FF cleared.
1805 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
1806 *
1807 * @param pVM The VM handle.
1808 */
1809PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM);
1810
1811/**
1812 * Perform an integrity check on the PGM component.
1813 *
1814 * @returns VINF_SUCCESS if everything is fine.
1815 * @returns VBox error status after asserting on integrity breach.
1816 * @param pVM The VM handle.
1817 */
1818PDMR3DECL(int) PGMR3CheckIntegrity(PVM pVM);
1819
1820/**
1821 * Converts a HC pointer to a GC physical address.
1822 *
1823 * Only for the debugger.
1824 *
1825 * @returns VBox status code.
1826 * @retval VINF_SUCCESS on success, *pGCPhys is set.
1827 * @retval VERR_INVALID_POINTER if the pointer is not within the GC physical memory.
1828 *
1829 * @param pVM The VM handle.
1830 * @param HCPtr The HC pointer to convert.
1831 * @param pGCPhys Where to store the GC physical address on success.
1832 */
1833PGMR3DECL(int) PGMR3DbgHCPtr2GCPhys(PVM pVM, RTHCPTR HCPtr, PRTGCPHYS pGCPhys);
1834
1835/**
1836 * Converts a HC pointer to a GC physical address.
1837 *
1838 * @returns VBox status code.
1839 * @retval VINF_SUCCESS on success, *pHCPhys is set.
1840 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical page but has no physical backing.
1841 * @retval VERR_INVALID_POINTER if the pointer is not within the GC physical memory.
1842 *
1843 * @param pVM The VM handle.
1844 * @param HCPtr The HC pointer to convert.
1845 * @param pHCPhys Where to store the HC physical address on success.
1846 */
1847PGMR3DECL(int) PGMR3DbgHCPtr2HCPhys(PVM pVM, RTHCPTR HCPtr, PRTHCPHYS pHCPhys);
1848
1849/**
1850 * Converts a HC physical address to a GC physical address.
1851 *
1852 * Only for the debugger.
1853 *
1854 * @returns VBox status code
1855 * @retval VINF_SUCCESS on success, *pGCPhys is set.
1856 * @retval VERR_INVALID_POINTER if the HC physical address is not within the GC physical memory.
1857 *
1858 * @param pVM The VM handle.
1859 * @param HCPhys The HC physical address to convert.
1860 * @param pGCPhys Where to store the GC physical address on success.
1861 */
1862PGMR3DECL(int) PGMR3DbgHCPhys2GCPhys(PVM pVM, RTHCPHYS HCPhys, PRTGCPHYS pGCPhys);
1863
1864/** @} */
1865
1866#endif /* IN_RING3 */
1867
1868__END_DECLS
1869
1870/** @} */
1871#endif
1872
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette