VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 26163

Last change on this file since 26163 was 25732, checked in by vboxsync, 15 years ago

PDMCritSect: Deployed lock ordering. (ring-3 only, only DEBUG_bird atm)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 165.5 KB
Line 
1/* $Id: VBoxRecompiler.c 25732 2010-01-11 16:23:26Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .pResultDesc = NULL,
214 .fFlags = 0,
215 .pfnHandler = remR3CmdDisasEnableStepping,
216 .pszSyntax = "[on/off]",
217 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
218 "If no arguments show the current state."
219 }
220};
221#endif
222
223/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
224uint8_t *code_gen_prologue;
225
226
227/*******************************************************************************
228* Internal Functions *
229*******************************************************************************/
230void remAbort(int rc, const char *pszTip);
231extern int testmath(void);
232
233/* Put them here to avoid unused variable warning. */
234AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
235#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
236//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
237/* Why did this have to be identical?? */
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#else
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#endif
242
243
244/**
245 * Initializes the REM.
246 *
247 * @returns VBox status code.
248 * @param pVM The VM to operate on.
249 */
250REMR3DECL(int) REMR3Init(PVM pVM)
251{
252 PREMHANDLERNOTIFICATION pCur;
253 uint32_t u32Dummy;
254 int rc;
255 unsigned i;
256
257#ifdef VBOX_ENABLE_VBOXREM64
258 LogRel(("Using 64-bit aware REM\n"));
259#endif
260
261 /*
262 * Assert sanity.
263 */
264 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
265 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
266 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
267#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
268 Assert(!testmath());
269#endif
270
271 /*
272 * Init some internal data members.
273 */
274 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
275 pVM->rem.s.Env.pVM = pVM;
276#ifdef CPU_RAW_MODE_INIT
277 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
278#endif
279
280 /*
281 * Initialize the REM critical section.
282 *
283 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
284 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
285 * deadlocks. (mostly pgm vs rem locking)
286 */
287 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
288 AssertRCReturn(rc, rc);
289
290 /* ctx. */
291 pVM->rem.s.pCtx = NULL; /* set when executing code. */
292 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
293
294 /* ignore all notifications */
295 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
296
297 code_gen_prologue = RTMemExecAlloc(_1K);
298 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
299
300 cpu_exec_init_all(0);
301
302 /*
303 * Init the recompiler.
304 */
305 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
306 {
307 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
308 return VERR_GENERAL_FAILURE;
309 }
310 PVMCPU pVCpu = VMMGetCpu(pVM);
311 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
312 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
313
314 /* allocate code buffer for single instruction emulation. */
315 pVM->rem.s.Env.cbCodeBuffer = 4096;
316 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
317 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
318
319 /* finally, set the cpu_single_env global. */
320 cpu_single_env = &pVM->rem.s.Env;
321
322 /* Nothing is pending by default */
323 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
324
325 /*
326 * Register ram types.
327 */
328 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
330 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
331 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
332 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
333
334 /* stop ignoring. */
335 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
336
337 /*
338 * Register the saved state data unit.
339 */
340 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
341 NULL, NULL, NULL,
342 NULL, remR3Save, NULL,
343 NULL, remR3Load, NULL);
344 if (RT_FAILURE(rc))
345 return rc;
346
347#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
348 /*
349 * Debugger commands.
350 */
351 static bool fRegisteredCmds = false;
352 if (!fRegisteredCmds)
353 {
354 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
355 if (RT_SUCCESS(rc))
356 fRegisteredCmds = true;
357 }
358#endif
359
360#ifdef VBOX_WITH_STATISTICS
361 /*
362 * Statistics.
363 */
364 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
365 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
366 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
367 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
368 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
371 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
372 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
373 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
374 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
375 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
376
377 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
378
379 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
380 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
381 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
382 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
383 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
384 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
385 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
386 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
387 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
388 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
389 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
390
391 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
392 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
393 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
394 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
395
396 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
402
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
407 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
409
410 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
411#endif /* VBOX_WITH_STATISTICS */
412
413 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
414 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
415 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
416
417
418#ifdef DEBUG_ALL_LOGGING
419 loglevel = ~0;
420# ifdef DEBUG_TMP_LOGGING
421 logfile = fopen("/tmp/vbox-qemu.log", "w");
422# endif
423#endif
424
425 /*
426 * Init the handler notification lists.
427 */
428 pVM->rem.s.idxPendingList = UINT32_MAX;
429 pVM->rem.s.idxFreeList = 0;
430
431 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
432 {
433 pCur = &pVM->rem.s.aHandlerNotifications[i];
434 pCur->idxNext = i + 1;
435 pCur->idxSelf = i;
436 }
437 pCur->idxNext = UINT32_MAX; /* the last record. */
438
439 return rc;
440}
441
442
443/**
444 * Finalizes the REM initialization.
445 *
446 * This is called after all components, devices and drivers has
447 * been initialized. Its main purpose it to finish the RAM related
448 * initialization.
449 *
450 * @returns VBox status code.
451 *
452 * @param pVM The VM handle.
453 */
454REMR3DECL(int) REMR3InitFinalize(PVM pVM)
455{
456 int rc;
457
458 /*
459 * Ram size & dirty bit map.
460 */
461 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
462 pVM->rem.s.fGCPhysLastRamFixed = true;
463#ifdef RT_STRICT
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
465#else
466 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
467#endif
468 return rc;
469}
470
471
472/**
473 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM handle.
477 * @param fGuarded Whether to guard the map.
478 */
479static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
480{
481 int rc = VINF_SUCCESS;
482 RTGCPHYS cb;
483
484 cb = pVM->rem.s.GCPhysLastRam + 1;
485 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
486 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
487 VERR_OUT_OF_RANGE);
488 phys_ram_size = cb;
489 phys_ram_dirty_size = cb >> PAGE_SHIFT;
490 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
491
492 if (!fGuarded)
493 {
494 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
495 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
496 }
497 else
498 {
499 /*
500 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
501 */
502 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
503 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
504 if (cbBitmapFull == cbBitmapAligned)
505 cbBitmapFull += _4G >> PAGE_SHIFT;
506 else if (cbBitmapFull - cbBitmapAligned < _64K)
507 cbBitmapFull += _64K;
508
509 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
510 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
511
512 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
513 if (RT_FAILURE(rc))
514 {
515 RTMemPageFree(phys_ram_dirty);
516 AssertLogRelRCReturn(rc, rc);
517 }
518
519 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
520 }
521
522 /* initialize it. */
523 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
524 return rc;
525}
526
527
528/**
529 * Terminates the REM.
530 *
531 * Termination means cleaning up and freeing all resources,
532 * the VM it self is at this point powered off or suspended.
533 *
534 * @returns VBox status code.
535 * @param pVM The VM to operate on.
536 */
537REMR3DECL(int) REMR3Term(PVM pVM)
538{
539#ifdef VBOX_WITH_STATISTICS
540 /*
541 * Statistics.
542 */
543 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
544 STAM_DEREG(pVM, &gStatCompilationQEmu);
545 STAM_DEREG(pVM, &gStatRunCodeQEmu);
546 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
547 STAM_DEREG(pVM, &gStatTimers);
548 STAM_DEREG(pVM, &gStatTBLookup);
549 STAM_DEREG(pVM, &gStatIRQ);
550 STAM_DEREG(pVM, &gStatRawCheck);
551 STAM_DEREG(pVM, &gStatMemRead);
552 STAM_DEREG(pVM, &gStatMemWrite);
553 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
554 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
555
556 STAM_DEREG(pVM, &gStatCpuGetTSC);
557
558 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
559 STAM_DEREG(pVM, &gStatRefuseVM86);
560 STAM_DEREG(pVM, &gStatRefusePaging);
561 STAM_DEREG(pVM, &gStatRefusePAE);
562 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
563 STAM_DEREG(pVM, &gStatRefuseIF0);
564 STAM_DEREG(pVM, &gStatRefuseCode16);
565 STAM_DEREG(pVM, &gStatRefuseWP0);
566 STAM_DEREG(pVM, &gStatRefuseRing1or2);
567 STAM_DEREG(pVM, &gStatRefuseCanExecute);
568 STAM_DEREG(pVM, &gStatFlushTBs);
569
570 STAM_DEREG(pVM, &gStatREMGDTChange);
571 STAM_DEREG(pVM, &gStatREMLDTRChange);
572 STAM_DEREG(pVM, &gStatREMIDTChange);
573 STAM_DEREG(pVM, &gStatREMTRChange);
574
575 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
580 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
581
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
588
589 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
590#endif /* VBOX_WITH_STATISTICS */
591
592 STAM_REL_DEREG(pVM, &tb_flush_count);
593 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
594 STAM_REL_DEREG(pVM, &tlb_flush_count);
595
596 return VINF_SUCCESS;
597}
598
599
600/**
601 * The VM is being reset.
602 *
603 * For the REM component this means to call the cpu_reset() and
604 * reinitialize some state variables.
605 *
606 * @param pVM VM handle.
607 */
608REMR3DECL(void) REMR3Reset(PVM pVM)
609{
610 /*
611 * Reset the REM cpu.
612 */
613 Assert(pVM->rem.s.cIgnoreAll == 0);
614 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
615 cpu_reset(&pVM->rem.s.Env);
616 pVM->rem.s.cInvalidatedPages = 0;
617 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
618 Assert(pVM->rem.s.cIgnoreAll == 0);
619
620 /* Clear raw ring 0 init state */
621 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
622
623 /* Flush the TBs the next time we execute code here. */
624 pVM->rem.s.fFlushTBs = true;
625}
626
627
628/**
629 * Execute state save operation.
630 *
631 * @returns VBox status code.
632 * @param pVM VM Handle.
633 * @param pSSM SSM operation handle.
634 */
635static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
636{
637 PREM pRem = &pVM->rem.s;
638
639 /*
640 * Save the required CPU Env bits.
641 * (Not much because we're never in REM when doing the save.)
642 */
643 LogFlow(("remR3Save:\n"));
644 Assert(!pRem->fInREM);
645 SSMR3PutU32(pSSM, pRem->Env.hflags);
646 SSMR3PutU32(pSSM, ~0); /* separator */
647
648 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
649 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
650 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
651
652 return SSMR3PutU32(pSSM, ~0); /* terminator */
653}
654
655
656/**
657 * Execute state load operation.
658 *
659 * @returns VBox status code.
660 * @param pVM VM Handle.
661 * @param pSSM SSM operation handle.
662 * @param uVersion Data layout version.
663 * @param uPass The data pass.
664 */
665static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
666{
667 uint32_t u32Dummy;
668 uint32_t fRawRing0 = false;
669 uint32_t u32Sep;
670 uint32_t i;
671 int rc;
672 PREM pRem;
673
674 LogFlow(("remR3Load:\n"));
675 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
676
677 /*
678 * Validate version.
679 */
680 if ( uVersion != REM_SAVED_STATE_VERSION
681 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
682 {
683 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
684 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
685 }
686
687 /*
688 * Do a reset to be on the safe side...
689 */
690 REMR3Reset(pVM);
691
692 /*
693 * Ignore all ignorable notifications.
694 * (Not doing this will cause serious trouble.)
695 */
696 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
697
698 /*
699 * Load the required CPU Env bits.
700 * (Not much because we're never in REM when doing the save.)
701 */
702 pRem = &pVM->rem.s;
703 Assert(!pRem->fInREM);
704 SSMR3GetU32(pSSM, &pRem->Env.hflags);
705 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
706 {
707 /* Redundant REM CPU state has to be loaded, but can be ignored. */
708 CPUX86State_Ver16 temp;
709 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
710 }
711
712 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
713 if (RT_FAILURE(rc))
714 return rc;
715 if (u32Sep != ~0U)
716 {
717 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
718 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
719 }
720
721 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
722 SSMR3GetUInt(pSSM, &fRawRing0);
723 if (fRawRing0)
724 pRem->Env.state |= CPU_RAW_RING0;
725
726 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
727 {
728 /*
729 * Load the REM stuff.
730 */
731 /** @todo r=bird: We should just drop all these items, restoring doesn't make
732 * sense. */
733 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
734 if (RT_FAILURE(rc))
735 return rc;
736 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
737 {
738 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
739 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
740 }
741 for (i = 0; i < pRem->cInvalidatedPages; i++)
742 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
743 }
744
745 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
746 if (RT_FAILURE(rc))
747 return rc;
748
749 /* check the terminator. */
750 rc = SSMR3GetU32(pSSM, &u32Sep);
751 if (RT_FAILURE(rc))
752 return rc;
753 if (u32Sep != ~0U)
754 {
755 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
756 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
757 }
758
759 /*
760 * Get the CPUID features.
761 */
762 PVMCPU pVCpu = VMMGetCpu(pVM);
763 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
764 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
765
766 /*
767 * Sync the Load Flush the TLB
768 */
769 tlb_flush(&pRem->Env, 1);
770
771 /*
772 * Stop ignoring ignornable notifications.
773 */
774 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
775
776 /*
777 * Sync the whole CPU state when executing code in the recompiler.
778 */
779 for (i = 0; i < pVM->cCpus; i++)
780 {
781 PVMCPU pVCpu = &pVM->aCpus[i];
782 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
783 }
784 return VINF_SUCCESS;
785}
786
787
788
789#undef LOG_GROUP
790#define LOG_GROUP LOG_GROUP_REM_RUN
791
792/**
793 * Single steps an instruction in recompiled mode.
794 *
795 * Before calling this function the REM state needs to be in sync with
796 * the VM. Call REMR3State() to perform the sync. It's only necessary
797 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
798 * and after calling REMR3StateBack().
799 *
800 * @returns VBox status code.
801 *
802 * @param pVM VM Handle.
803 * @param pVCpu VMCPU Handle.
804 */
805REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
806{
807 int rc, interrupt_request;
808 RTGCPTR GCPtrPC;
809 bool fBp;
810
811 /*
812 * Lock the REM - we don't wanna have anyone interrupting us
813 * while stepping - and enabled single stepping. We also ignore
814 * pending interrupts and suchlike.
815 */
816 interrupt_request = pVM->rem.s.Env.interrupt_request;
817 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
818 pVM->rem.s.Env.interrupt_request = 0;
819 cpu_single_step(&pVM->rem.s.Env, 1);
820
821 /*
822 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
823 */
824 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
825 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
826
827 /*
828 * Execute and handle the return code.
829 * We execute without enabling the cpu tick, so on success we'll
830 * just flip it on and off to make sure it moves
831 */
832 rc = cpu_exec(&pVM->rem.s.Env);
833 if (rc == EXCP_DEBUG)
834 {
835 TMR3NotifyResume(pVM, pVCpu);
836 TMR3NotifySuspend(pVM, pVCpu);
837 rc = VINF_EM_DBG_STEPPED;
838 }
839 else
840 {
841 switch (rc)
842 {
843 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
844 case EXCP_HLT:
845 case EXCP_HALTED: rc = VINF_EM_HALT; break;
846 case EXCP_RC:
847 rc = pVM->rem.s.rc;
848 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
849 break;
850 case EXCP_EXECUTE_RAW:
851 case EXCP_EXECUTE_HWACC:
852 /** @todo: is it correct? No! */
853 rc = VINF_SUCCESS;
854 break;
855 default:
856 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
857 rc = VERR_INTERNAL_ERROR;
858 break;
859 }
860 }
861
862 /*
863 * Restore the stuff we changed to prevent interruption.
864 * Unlock the REM.
865 */
866 if (fBp)
867 {
868 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
869 Assert(rc2 == 0); NOREF(rc2);
870 }
871 cpu_single_step(&pVM->rem.s.Env, 0);
872 pVM->rem.s.Env.interrupt_request = interrupt_request;
873
874 return rc;
875}
876
877
878/**
879 * Set a breakpoint using the REM facilities.
880 *
881 * @returns VBox status code.
882 * @param pVM The VM handle.
883 * @param Address The breakpoint address.
884 * @thread The emulation thread.
885 */
886REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
887{
888 VM_ASSERT_EMT(pVM);
889 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
890 {
891 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
892 return VINF_SUCCESS;
893 }
894 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
895 return VERR_REM_NO_MORE_BP_SLOTS;
896}
897
898
899/**
900 * Clears a breakpoint set by REMR3BreakpointSet().
901 *
902 * @returns VBox status code.
903 * @param pVM The VM handle.
904 * @param Address The breakpoint address.
905 * @thread The emulation thread.
906 */
907REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
908{
909 VM_ASSERT_EMT(pVM);
910 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
911 {
912 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
913 return VINF_SUCCESS;
914 }
915 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
916 return VERR_REM_BP_NOT_FOUND;
917}
918
919
920/**
921 * Emulate an instruction.
922 *
923 * This function executes one instruction without letting anyone
924 * interrupt it. This is intended for being called while being in
925 * raw mode and thus will take care of all the state syncing between
926 * REM and the rest.
927 *
928 * @returns VBox status code.
929 * @param pVM VM handle.
930 * @param pVCpu VMCPU Handle.
931 */
932REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
933{
934 bool fFlushTBs;
935
936 int rc, rc2;
937 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
938
939 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
940 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
941 */
942 if (HWACCMIsEnabled(pVM))
943 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
944
945 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
946 fFlushTBs = pVM->rem.s.fFlushTBs;
947 pVM->rem.s.fFlushTBs = false;
948
949 /*
950 * Sync the state and enable single instruction / single stepping.
951 */
952 rc = REMR3State(pVM, pVCpu);
953 pVM->rem.s.fFlushTBs = fFlushTBs;
954 if (RT_SUCCESS(rc))
955 {
956 int interrupt_request = pVM->rem.s.Env.interrupt_request;
957 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
958 Assert(!pVM->rem.s.Env.singlestep_enabled);
959 /*
960 * Now we set the execute single instruction flag and enter the cpu_exec loop.
961 */
962 TMNotifyStartOfExecution(pVCpu);
963 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
964 rc = cpu_exec(&pVM->rem.s.Env);
965 TMNotifyEndOfExecution(pVCpu);
966 switch (rc)
967 {
968 /*
969 * Executed without anything out of the way happening.
970 */
971 case EXCP_SINGLE_INSTR:
972 rc = VINF_EM_RESCHEDULE;
973 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
974 break;
975
976 /*
977 * If we take a trap or start servicing a pending interrupt, we might end up here.
978 * (Timer thread or some other thread wishing EMT's attention.)
979 */
980 case EXCP_INTERRUPT:
981 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
982 rc = VINF_EM_RESCHEDULE;
983 break;
984
985 /*
986 * Single step, we assume!
987 * If there was a breakpoint there we're fucked now.
988 */
989 case EXCP_DEBUG:
990 {
991 /* breakpoint or single step? */
992 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
993 int iBP;
994 rc = VINF_EM_DBG_STEPPED;
995 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
996 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
997 {
998 rc = VINF_EM_DBG_BREAKPOINT;
999 break;
1000 }
1001 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1002 break;
1003 }
1004
1005 /*
1006 * hlt instruction.
1007 */
1008 case EXCP_HLT:
1009 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1010 rc = VINF_EM_HALT;
1011 break;
1012
1013 /*
1014 * The VM has halted.
1015 */
1016 case EXCP_HALTED:
1017 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1018 rc = VINF_EM_HALT;
1019 break;
1020
1021 /*
1022 * Switch to RAW-mode.
1023 */
1024 case EXCP_EXECUTE_RAW:
1025 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1026 rc = VINF_EM_RESCHEDULE_RAW;
1027 break;
1028
1029 /*
1030 * Switch to hardware accelerated RAW-mode.
1031 */
1032 case EXCP_EXECUTE_HWACC:
1033 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1034 rc = VINF_EM_RESCHEDULE_HWACC;
1035 break;
1036
1037 /*
1038 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1039 */
1040 case EXCP_RC:
1041 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1042 rc = pVM->rem.s.rc;
1043 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1044 break;
1045
1046 /*
1047 * Figure out the rest when they arrive....
1048 */
1049 default:
1050 AssertMsgFailed(("rc=%d\n", rc));
1051 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1052 rc = VINF_EM_RESCHEDULE;
1053 break;
1054 }
1055
1056 /*
1057 * Switch back the state.
1058 */
1059 pVM->rem.s.Env.interrupt_request = interrupt_request;
1060 rc2 = REMR3StateBack(pVM, pVCpu);
1061 AssertRC(rc2);
1062 }
1063
1064 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1065 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1066 return rc;
1067}
1068
1069
1070/**
1071 * Runs code in recompiled mode.
1072 *
1073 * Before calling this function the REM state needs to be in sync with
1074 * the VM. Call REMR3State() to perform the sync. It's only necessary
1075 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1076 * and after calling REMR3StateBack().
1077 *
1078 * @returns VBox status code.
1079 *
1080 * @param pVM VM Handle.
1081 * @param pVCpu VMCPU Handle.
1082 */
1083REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1084{
1085 int rc;
1086 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1087 Assert(pVM->rem.s.fInREM);
1088
1089 TMNotifyStartOfExecution(pVCpu);
1090 rc = cpu_exec(&pVM->rem.s.Env);
1091 TMNotifyEndOfExecution(pVCpu);
1092 switch (rc)
1093 {
1094 /*
1095 * This happens when the execution was interrupted
1096 * by an external event, like pending timers.
1097 */
1098 case EXCP_INTERRUPT:
1099 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1100 rc = VINF_SUCCESS;
1101 break;
1102
1103 /*
1104 * hlt instruction.
1105 */
1106 case EXCP_HLT:
1107 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1108 rc = VINF_EM_HALT;
1109 break;
1110
1111 /*
1112 * The VM has halted.
1113 */
1114 case EXCP_HALTED:
1115 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1116 rc = VINF_EM_HALT;
1117 break;
1118
1119 /*
1120 * Breakpoint/single step.
1121 */
1122 case EXCP_DEBUG:
1123 {
1124#if 0//def DEBUG_bird
1125 static int iBP = 0;
1126 printf("howdy, breakpoint! iBP=%d\n", iBP);
1127 switch (iBP)
1128 {
1129 case 0:
1130 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1131 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1132 //pVM->rem.s.Env.interrupt_request = 0;
1133 //pVM->rem.s.Env.exception_index = -1;
1134 //g_fInterruptDisabled = 1;
1135 rc = VINF_SUCCESS;
1136 asm("int3");
1137 break;
1138 default:
1139 asm("int3");
1140 break;
1141 }
1142 iBP++;
1143#else
1144 /* breakpoint or single step? */
1145 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1146 int iBP;
1147 rc = VINF_EM_DBG_STEPPED;
1148 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1149 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1150 {
1151 rc = VINF_EM_DBG_BREAKPOINT;
1152 break;
1153 }
1154 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1155#endif
1156 break;
1157 }
1158
1159 /*
1160 * Switch to RAW-mode.
1161 */
1162 case EXCP_EXECUTE_RAW:
1163 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1164 rc = VINF_EM_RESCHEDULE_RAW;
1165 break;
1166
1167 /*
1168 * Switch to hardware accelerated RAW-mode.
1169 */
1170 case EXCP_EXECUTE_HWACC:
1171 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1172 rc = VINF_EM_RESCHEDULE_HWACC;
1173 break;
1174
1175 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1176 /*
1177 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1178 */
1179 case EXCP_RC:
1180 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1181 rc = pVM->rem.s.rc;
1182 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1183 break;
1184
1185 /*
1186 * Figure out the rest when they arrive....
1187 */
1188 default:
1189 AssertMsgFailed(("rc=%d\n", rc));
1190 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1191 rc = VINF_SUCCESS;
1192 break;
1193 }
1194
1195 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1196 return rc;
1197}
1198
1199
1200/**
1201 * Check if the cpu state is suitable for Raw execution.
1202 *
1203 * @returns boolean
1204 * @param env The CPU env struct.
1205 * @param eip The EIP to check this for (might differ from env->eip).
1206 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1207 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1208 *
1209 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1210 */
1211bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1212{
1213 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1214 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1215 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1216 uint32_t u32CR0;
1217
1218 /* Update counter. */
1219 env->pVM->rem.s.cCanExecuteRaw++;
1220
1221 if (HWACCMIsEnabled(env->pVM))
1222 {
1223 CPUMCTX Ctx;
1224
1225 env->state |= CPU_RAW_HWACC;
1226
1227 /*
1228 * Create partial context for HWACCMR3CanExecuteGuest
1229 */
1230 Ctx.cr0 = env->cr[0];
1231 Ctx.cr3 = env->cr[3];
1232 Ctx.cr4 = env->cr[4];
1233
1234 Ctx.tr = env->tr.selector;
1235 Ctx.trHid.u64Base = env->tr.base;
1236 Ctx.trHid.u32Limit = env->tr.limit;
1237 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1238
1239 Ctx.idtr.cbIdt = env->idt.limit;
1240 Ctx.idtr.pIdt = env->idt.base;
1241
1242 Ctx.gdtr.cbGdt = env->gdt.limit;
1243 Ctx.gdtr.pGdt = env->gdt.base;
1244
1245 Ctx.rsp = env->regs[R_ESP];
1246 Ctx.rip = env->eip;
1247
1248 Ctx.eflags.u32 = env->eflags;
1249
1250 Ctx.cs = env->segs[R_CS].selector;
1251 Ctx.csHid.u64Base = env->segs[R_CS].base;
1252 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1253 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1254
1255 Ctx.ds = env->segs[R_DS].selector;
1256 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1257 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1258 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1259
1260 Ctx.es = env->segs[R_ES].selector;
1261 Ctx.esHid.u64Base = env->segs[R_ES].base;
1262 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1263 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1264
1265 Ctx.fs = env->segs[R_FS].selector;
1266 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1267 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1268 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1269
1270 Ctx.gs = env->segs[R_GS].selector;
1271 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1272 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1273 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1274
1275 Ctx.ss = env->segs[R_SS].selector;
1276 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1277 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1278 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1279
1280 Ctx.msrEFER = env->efer;
1281
1282 /* Hardware accelerated raw-mode:
1283 *
1284 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1285 */
1286 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1287 {
1288 *piException = EXCP_EXECUTE_HWACC;
1289 return true;
1290 }
1291 return false;
1292 }
1293
1294 /*
1295 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1296 * or 32 bits protected mode ring 0 code
1297 *
1298 * The tests are ordered by the likelyhood of being true during normal execution.
1299 */
1300 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1301 {
1302 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1303 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1304 return false;
1305 }
1306
1307#ifndef VBOX_RAW_V86
1308 if (fFlags & VM_MASK) {
1309 STAM_COUNTER_INC(&gStatRefuseVM86);
1310 Log2(("raw mode refused: VM_MASK\n"));
1311 return false;
1312 }
1313#endif
1314
1315 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1316 {
1317#ifndef DEBUG_bird
1318 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1319#endif
1320 return false;
1321 }
1322
1323 if (env->singlestep_enabled)
1324 {
1325 //Log2(("raw mode refused: Single step\n"));
1326 return false;
1327 }
1328
1329 if (env->nb_breakpoints > 0)
1330 {
1331 //Log2(("raw mode refused: Breakpoints\n"));
1332 return false;
1333 }
1334
1335 u32CR0 = env->cr[0];
1336 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1337 {
1338 STAM_COUNTER_INC(&gStatRefusePaging);
1339 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1340 return false;
1341 }
1342
1343 if (env->cr[4] & CR4_PAE_MASK)
1344 {
1345 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1346 {
1347 STAM_COUNTER_INC(&gStatRefusePAE);
1348 return false;
1349 }
1350 }
1351
1352 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1353 {
1354 if (!EMIsRawRing3Enabled(env->pVM))
1355 return false;
1356
1357 if (!(env->eflags & IF_MASK))
1358 {
1359 STAM_COUNTER_INC(&gStatRefuseIF0);
1360 Log2(("raw mode refused: IF (RawR3)\n"));
1361 return false;
1362 }
1363
1364 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1365 {
1366 STAM_COUNTER_INC(&gStatRefuseWP0);
1367 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1368 return false;
1369 }
1370 }
1371 else
1372 {
1373 if (!EMIsRawRing0Enabled(env->pVM))
1374 return false;
1375
1376 // Let's start with pure 32 bits ring 0 code first
1377 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1378 {
1379 STAM_COUNTER_INC(&gStatRefuseCode16);
1380 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1381 return false;
1382 }
1383
1384 // Only R0
1385 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1386 {
1387 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1388 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1389 return false;
1390 }
1391
1392 if (!(u32CR0 & CR0_WP_MASK))
1393 {
1394 STAM_COUNTER_INC(&gStatRefuseWP0);
1395 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1396 return false;
1397 }
1398
1399 if (PATMIsPatchGCAddr(env->pVM, eip))
1400 {
1401 Log2(("raw r0 mode forced: patch code\n"));
1402 *piException = EXCP_EXECUTE_RAW;
1403 return true;
1404 }
1405
1406#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1407 if (!(env->eflags & IF_MASK))
1408 {
1409 STAM_COUNTER_INC(&gStatRefuseIF0);
1410 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1411 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1412 return false;
1413 }
1414#endif
1415
1416 env->state |= CPU_RAW_RING0;
1417 }
1418
1419 /*
1420 * Don't reschedule the first time we're called, because there might be
1421 * special reasons why we're here that is not covered by the above checks.
1422 */
1423 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1424 {
1425 Log2(("raw mode refused: first scheduling\n"));
1426 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1427 return false;
1428 }
1429
1430 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1431 *piException = EXCP_EXECUTE_RAW;
1432 return true;
1433}
1434
1435
1436/**
1437 * Fetches a code byte.
1438 *
1439 * @returns Success indicator (bool) for ease of use.
1440 * @param env The CPU environment structure.
1441 * @param GCPtrInstr Where to fetch code.
1442 * @param pu8Byte Where to store the byte on success
1443 */
1444bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1445{
1446 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1447 if (RT_SUCCESS(rc))
1448 return true;
1449 return false;
1450}
1451
1452
1453/**
1454 * Flush (or invalidate if you like) page table/dir entry.
1455 *
1456 * (invlpg instruction; tlb_flush_page)
1457 *
1458 * @param env Pointer to cpu environment.
1459 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1460 */
1461void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1462{
1463 PVM pVM = env->pVM;
1464 PCPUMCTX pCtx;
1465 int rc;
1466
1467 /*
1468 * When we're replaying invlpg instructions or restoring a saved
1469 * state we disable this path.
1470 */
1471 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1472 return;
1473 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1474 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1475
1476 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1477
1478 /*
1479 * Update the control registers before calling PGMFlushPage.
1480 */
1481 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1482 Assert(pCtx);
1483 pCtx->cr0 = env->cr[0];
1484 pCtx->cr3 = env->cr[3];
1485 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1486 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1487 pCtx->cr4 = env->cr[4];
1488
1489 /*
1490 * Let PGM do the rest.
1491 */
1492 Assert(env->pVCpu);
1493 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1494 if (RT_FAILURE(rc))
1495 {
1496 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1497 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1498 }
1499 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1500}
1501
1502
1503#ifndef REM_PHYS_ADDR_IN_TLB
1504/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1505void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1506{
1507 void *pv;
1508 int rc;
1509
1510 /* Address must be aligned enough to fiddle with lower bits */
1511 Assert((physAddr & 0x3) == 0);
1512
1513 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1514 Assert( rc == VINF_SUCCESS
1515 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1516 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1517 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1518 if (RT_FAILURE(rc))
1519 return (void *)1;
1520 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1521 return (void *)((uintptr_t)pv | 2);
1522 return pv;
1523}
1524#endif /* REM_PHYS_ADDR_IN_TLB */
1525
1526
1527/**
1528 * Called from tlb_protect_code in order to write monitor a code page.
1529 *
1530 * @param env Pointer to the CPU environment.
1531 * @param GCPtr Code page to monitor
1532 */
1533void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1534{
1535#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1536 Assert(env->pVM->rem.s.fInREM);
1537 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1538 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1539 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1540 && !(env->eflags & VM_MASK) /* no V86 mode */
1541 && !HWACCMIsEnabled(env->pVM))
1542 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1543#endif
1544}
1545
1546
1547/**
1548 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1549 *
1550 * @param env Pointer to the CPU environment.
1551 * @param GCPtr Code page to monitor
1552 */
1553void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1554{
1555 Assert(env->pVM->rem.s.fInREM);
1556#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1557 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1558 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1559 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1560 && !(env->eflags & VM_MASK) /* no V86 mode */
1561 && !HWACCMIsEnabled(env->pVM))
1562 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1563#endif
1564}
1565
1566
1567/**
1568 * Called when the CPU is initialized, any of the CRx registers are changed or
1569 * when the A20 line is modified.
1570 *
1571 * @param env Pointer to the CPU environment.
1572 * @param fGlobal Set if the flush is global.
1573 */
1574void remR3FlushTLB(CPUState *env, bool fGlobal)
1575{
1576 PVM pVM = env->pVM;
1577 PCPUMCTX pCtx;
1578
1579 /*
1580 * When we're replaying invlpg instructions or restoring a saved
1581 * state we disable this path.
1582 */
1583 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1584 return;
1585 Assert(pVM->rem.s.fInREM);
1586
1587 /*
1588 * The caller doesn't check cr4, so we have to do that for ourselves.
1589 */
1590 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1591 fGlobal = true;
1592 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1593
1594 /*
1595 * Update the control registers before calling PGMR3FlushTLB.
1596 */
1597 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1598 Assert(pCtx);
1599 pCtx->cr0 = env->cr[0];
1600 pCtx->cr3 = env->cr[3];
1601 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1602 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1603 pCtx->cr4 = env->cr[4];
1604
1605 /*
1606 * Let PGM do the rest.
1607 */
1608 Assert(env->pVCpu);
1609 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1610}
1611
1612
1613/**
1614 * Called when any of the cr0, cr4 or efer registers is updated.
1615 *
1616 * @param env Pointer to the CPU environment.
1617 */
1618void remR3ChangeCpuMode(CPUState *env)
1619{
1620 PVM pVM = env->pVM;
1621 uint64_t efer;
1622 PCPUMCTX pCtx;
1623 int rc;
1624
1625 /*
1626 * When we're replaying loads or restoring a saved
1627 * state this path is disabled.
1628 */
1629 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1630 return;
1631 Assert(pVM->rem.s.fInREM);
1632
1633 /*
1634 * Update the control registers before calling PGMChangeMode()
1635 * as it may need to map whatever cr3 is pointing to.
1636 */
1637 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1638 Assert(pCtx);
1639 pCtx->cr0 = env->cr[0];
1640 pCtx->cr3 = env->cr[3];
1641 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1642 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1643 pCtx->cr4 = env->cr[4];
1644
1645#ifdef TARGET_X86_64
1646 efer = env->efer;
1647#else
1648 efer = 0;
1649#endif
1650 Assert(env->pVCpu);
1651 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1652 if (rc != VINF_SUCCESS)
1653 {
1654 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1655 {
1656 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1657 remR3RaiseRC(env->pVM, rc);
1658 }
1659 else
1660 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1661 }
1662}
1663
1664
1665/**
1666 * Called from compiled code to run dma.
1667 *
1668 * @param env Pointer to the CPU environment.
1669 */
1670void remR3DmaRun(CPUState *env)
1671{
1672 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1673 PDMR3DmaRun(env->pVM);
1674 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1675}
1676
1677
1678/**
1679 * Called from compiled code to schedule pending timers in VMM
1680 *
1681 * @param env Pointer to the CPU environment.
1682 */
1683void remR3TimersRun(CPUState *env)
1684{
1685 LogFlow(("remR3TimersRun:\n"));
1686 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1687 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1688 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1689 TMR3TimerQueuesDo(env->pVM);
1690 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1691 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1692}
1693
1694
1695/**
1696 * Record trap occurance
1697 *
1698 * @returns VBox status code
1699 * @param env Pointer to the CPU environment.
1700 * @param uTrap Trap nr
1701 * @param uErrorCode Error code
1702 * @param pvNextEIP Next EIP
1703 */
1704int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1705{
1706 PVM pVM = env->pVM;
1707#ifdef VBOX_WITH_STATISTICS
1708 static STAMCOUNTER s_aStatTrap[255];
1709 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1710#endif
1711
1712#ifdef VBOX_WITH_STATISTICS
1713 if (uTrap < 255)
1714 {
1715 if (!s_aRegisters[uTrap])
1716 {
1717 char szStatName[64];
1718 s_aRegisters[uTrap] = true;
1719 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1720 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1721 }
1722 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1723 }
1724#endif
1725 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1726 if( uTrap < 0x20
1727 && (env->cr[0] & X86_CR0_PE)
1728 && !(env->eflags & X86_EFL_VM))
1729 {
1730#ifdef DEBUG
1731 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1732#endif
1733 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1734 {
1735 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1736 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1737 return VERR_REM_TOO_MANY_TRAPS;
1738 }
1739 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1740 pVM->rem.s.cPendingExceptions = 1;
1741 pVM->rem.s.uPendingException = uTrap;
1742 pVM->rem.s.uPendingExcptEIP = env->eip;
1743 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1744 }
1745 else
1746 {
1747 pVM->rem.s.cPendingExceptions = 0;
1748 pVM->rem.s.uPendingException = uTrap;
1749 pVM->rem.s.uPendingExcptEIP = env->eip;
1750 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1751 }
1752 return VINF_SUCCESS;
1753}
1754
1755
1756/*
1757 * Clear current active trap
1758 *
1759 * @param pVM VM Handle.
1760 */
1761void remR3TrapClear(PVM pVM)
1762{
1763 pVM->rem.s.cPendingExceptions = 0;
1764 pVM->rem.s.uPendingException = 0;
1765 pVM->rem.s.uPendingExcptEIP = 0;
1766 pVM->rem.s.uPendingExcptCR2 = 0;
1767}
1768
1769
1770/*
1771 * Record previous call instruction addresses
1772 *
1773 * @param env Pointer to the CPU environment.
1774 */
1775void remR3RecordCall(CPUState *env)
1776{
1777 CSAMR3RecordCallAddress(env->pVM, env->eip);
1778}
1779
1780
1781/**
1782 * Syncs the internal REM state with the VM.
1783 *
1784 * This must be called before REMR3Run() is invoked whenever when the REM
1785 * state is not up to date. Calling it several times in a row is not
1786 * permitted.
1787 *
1788 * @returns VBox status code.
1789 *
1790 * @param pVM VM Handle.
1791 * @param pVCpu VMCPU Handle.
1792 *
1793 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1794 * no do this since the majority of the callers don't want any unnecessary of events
1795 * pending that would immediatly interrupt execution.
1796 */
1797REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1798{
1799 register const CPUMCTX *pCtx;
1800 register unsigned fFlags;
1801 bool fHiddenSelRegsValid;
1802 unsigned i;
1803 TRPMEVENT enmType;
1804 uint8_t u8TrapNo;
1805 int rc;
1806
1807 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1808 Log2(("REMR3State:\n"));
1809
1810 pVM->rem.s.Env.pVCpu = pVCpu;
1811 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1812 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1813
1814 Assert(!pVM->rem.s.fInREM);
1815 pVM->rem.s.fInStateSync = true;
1816
1817 /*
1818 * If we have to flush TBs, do that immediately.
1819 */
1820 if (pVM->rem.s.fFlushTBs)
1821 {
1822 STAM_COUNTER_INC(&gStatFlushTBs);
1823 tb_flush(&pVM->rem.s.Env);
1824 pVM->rem.s.fFlushTBs = false;
1825 }
1826
1827 /*
1828 * Copy the registers which require no special handling.
1829 */
1830#ifdef TARGET_X86_64
1831 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1832 Assert(R_EAX == 0);
1833 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1834 Assert(R_ECX == 1);
1835 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1836 Assert(R_EDX == 2);
1837 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1838 Assert(R_EBX == 3);
1839 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1840 Assert(R_ESP == 4);
1841 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1842 Assert(R_EBP == 5);
1843 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1844 Assert(R_ESI == 6);
1845 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1846 Assert(R_EDI == 7);
1847 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1848 pVM->rem.s.Env.regs[8] = pCtx->r8;
1849 pVM->rem.s.Env.regs[9] = pCtx->r9;
1850 pVM->rem.s.Env.regs[10] = pCtx->r10;
1851 pVM->rem.s.Env.regs[11] = pCtx->r11;
1852 pVM->rem.s.Env.regs[12] = pCtx->r12;
1853 pVM->rem.s.Env.regs[13] = pCtx->r13;
1854 pVM->rem.s.Env.regs[14] = pCtx->r14;
1855 pVM->rem.s.Env.regs[15] = pCtx->r15;
1856
1857 pVM->rem.s.Env.eip = pCtx->rip;
1858
1859 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1860#else
1861 Assert(R_EAX == 0);
1862 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1863 Assert(R_ECX == 1);
1864 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1865 Assert(R_EDX == 2);
1866 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1867 Assert(R_EBX == 3);
1868 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1869 Assert(R_ESP == 4);
1870 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1871 Assert(R_EBP == 5);
1872 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1873 Assert(R_ESI == 6);
1874 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1875 Assert(R_EDI == 7);
1876 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1877 pVM->rem.s.Env.eip = pCtx->eip;
1878
1879 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1880#endif
1881
1882 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1883
1884 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1885 for (i=0;i<8;i++)
1886 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1887
1888 /*
1889 * Clear the halted hidden flag (the interrupt waking up the CPU can
1890 * have been dispatched in raw mode).
1891 */
1892 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1893
1894 /*
1895 * Replay invlpg?
1896 */
1897 if (pVM->rem.s.cInvalidatedPages)
1898 {
1899 RTUINT i;
1900
1901 pVM->rem.s.fIgnoreInvlPg = true;
1902 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1903 {
1904 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1905 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1906 }
1907 pVM->rem.s.fIgnoreInvlPg = false;
1908 pVM->rem.s.cInvalidatedPages = 0;
1909 }
1910
1911 /* Replay notification changes. */
1912 REMR3ReplayHandlerNotifications(pVM);
1913
1914 /* Update MSRs; before CRx registers! */
1915 pVM->rem.s.Env.efer = pCtx->msrEFER;
1916 pVM->rem.s.Env.star = pCtx->msrSTAR;
1917 pVM->rem.s.Env.pat = pCtx->msrPAT;
1918#ifdef TARGET_X86_64
1919 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1920 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1921 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1922 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1923
1924 /* Update the internal long mode activate flag according to the new EFER value. */
1925 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1926 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1927 else
1928 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1929#endif
1930
1931 /*
1932 * Registers which are rarely changed and require special handling / order when changed.
1933 */
1934 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1935 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1936 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1937 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1938 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1939 {
1940 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1941 {
1942 pVM->rem.s.fIgnoreCR3Load = true;
1943 tlb_flush(&pVM->rem.s.Env, true);
1944 pVM->rem.s.fIgnoreCR3Load = false;
1945 }
1946
1947 /* CR4 before CR0! */
1948 if (fFlags & CPUM_CHANGED_CR4)
1949 {
1950 pVM->rem.s.fIgnoreCR3Load = true;
1951 pVM->rem.s.fIgnoreCpuMode = true;
1952 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1953 pVM->rem.s.fIgnoreCpuMode = false;
1954 pVM->rem.s.fIgnoreCR3Load = false;
1955 }
1956
1957 if (fFlags & CPUM_CHANGED_CR0)
1958 {
1959 pVM->rem.s.fIgnoreCR3Load = true;
1960 pVM->rem.s.fIgnoreCpuMode = true;
1961 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1962 pVM->rem.s.fIgnoreCpuMode = false;
1963 pVM->rem.s.fIgnoreCR3Load = false;
1964 }
1965
1966 if (fFlags & CPUM_CHANGED_CR3)
1967 {
1968 pVM->rem.s.fIgnoreCR3Load = true;
1969 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1970 pVM->rem.s.fIgnoreCR3Load = false;
1971 }
1972
1973 if (fFlags & CPUM_CHANGED_GDTR)
1974 {
1975 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1976 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1977 }
1978
1979 if (fFlags & CPUM_CHANGED_IDTR)
1980 {
1981 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1982 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1983 }
1984
1985 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1986 {
1987 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1988 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1989 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1990 }
1991
1992 if (fFlags & CPUM_CHANGED_LDTR)
1993 {
1994 if (fHiddenSelRegsValid)
1995 {
1996 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1997 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1998 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1999 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2000 }
2001 else
2002 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2003 }
2004
2005 if (fFlags & CPUM_CHANGED_CPUID)
2006 {
2007 uint32_t u32Dummy;
2008
2009 /*
2010 * Get the CPUID features.
2011 */
2012 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2013 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2014 }
2015
2016 /* Sync FPU state after CR4, CPUID and EFER (!). */
2017 if (fFlags & CPUM_CHANGED_FPU_REM)
2018 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2019 }
2020
2021 /*
2022 * Sync TR unconditionally to make life simpler.
2023 */
2024 pVM->rem.s.Env.tr.selector = pCtx->tr;
2025 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2026 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2027 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2028 /* Note! do_interrupt will fault if the busy flag is still set... */
2029 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2030
2031 /*
2032 * Update selector registers.
2033 * This must be done *after* we've synced gdt, ldt and crX registers
2034 * since we're reading the GDT/LDT om sync_seg. This will happen with
2035 * saved state which takes a quick dip into rawmode for instance.
2036 */
2037 /*
2038 * Stack; Note first check this one as the CPL might have changed. The
2039 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2040 */
2041
2042 if (fHiddenSelRegsValid)
2043 {
2044 /* The hidden selector registers are valid in the CPU context. */
2045 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2046
2047 /* Set current CPL */
2048 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2049
2050 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2051 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2052 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2053 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2054 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2055 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2056 }
2057 else
2058 {
2059 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2060 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2061 {
2062 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2063
2064 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2065 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2066#ifdef VBOX_WITH_STATISTICS
2067 if (pVM->rem.s.Env.segs[R_SS].newselector)
2068 {
2069 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2070 }
2071#endif
2072 }
2073 else
2074 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2075
2076 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2077 {
2078 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2079 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2080#ifdef VBOX_WITH_STATISTICS
2081 if (pVM->rem.s.Env.segs[R_ES].newselector)
2082 {
2083 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2084 }
2085#endif
2086 }
2087 else
2088 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2089
2090 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2091 {
2092 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2093 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2094#ifdef VBOX_WITH_STATISTICS
2095 if (pVM->rem.s.Env.segs[R_CS].newselector)
2096 {
2097 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2098 }
2099#endif
2100 }
2101 else
2102 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2103
2104 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2105 {
2106 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2107 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2108#ifdef VBOX_WITH_STATISTICS
2109 if (pVM->rem.s.Env.segs[R_DS].newselector)
2110 {
2111 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2112 }
2113#endif
2114 }
2115 else
2116 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2117
2118 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2119 * be the same but not the base/limit. */
2120 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2121 {
2122 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2123 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2124#ifdef VBOX_WITH_STATISTICS
2125 if (pVM->rem.s.Env.segs[R_FS].newselector)
2126 {
2127 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2128 }
2129#endif
2130 }
2131 else
2132 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2133
2134 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2135 {
2136 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2137 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2138#ifdef VBOX_WITH_STATISTICS
2139 if (pVM->rem.s.Env.segs[R_GS].newselector)
2140 {
2141 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2142 }
2143#endif
2144 }
2145 else
2146 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2147 }
2148
2149 /*
2150 * Check for traps.
2151 */
2152 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2153 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2154 if (RT_SUCCESS(rc))
2155 {
2156#ifdef DEBUG
2157 if (u8TrapNo == 0x80)
2158 {
2159 remR3DumpLnxSyscall(pVCpu);
2160 remR3DumpOBsdSyscall(pVCpu);
2161 }
2162#endif
2163
2164 pVM->rem.s.Env.exception_index = u8TrapNo;
2165 if (enmType != TRPM_SOFTWARE_INT)
2166 {
2167 pVM->rem.s.Env.exception_is_int = 0;
2168 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2169 }
2170 else
2171 {
2172 /*
2173 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2174 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2175 * for int03 and into.
2176 */
2177 pVM->rem.s.Env.exception_is_int = 1;
2178 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2179 /* int 3 may be generated by one-byte 0xcc */
2180 if (u8TrapNo == 3)
2181 {
2182 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2183 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2184 }
2185 /* int 4 may be generated by one-byte 0xce */
2186 else if (u8TrapNo == 4)
2187 {
2188 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2189 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2190 }
2191 }
2192
2193 /* get error code and cr2 if needed. */
2194 switch (u8TrapNo)
2195 {
2196 case 0x0e:
2197 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2198 /* fallthru */
2199 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2200 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2201 break;
2202
2203 case 0x11: case 0x08:
2204 default:
2205 pVM->rem.s.Env.error_code = 0;
2206 break;
2207 }
2208
2209 /*
2210 * We can now reset the active trap since the recompiler is gonna have a go at it.
2211 */
2212 rc = TRPMResetTrap(pVCpu);
2213 AssertRC(rc);
2214 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2215 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2216 }
2217
2218 /*
2219 * Clear old interrupt request flags; Check for pending hardware interrupts.
2220 * (See @remark for why we don't check for other FFs.)
2221 */
2222 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2223 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2224 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2225 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2226
2227 /*
2228 * We're now in REM mode.
2229 */
2230 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2231 pVM->rem.s.fInREM = true;
2232 pVM->rem.s.fInStateSync = false;
2233 pVM->rem.s.cCanExecuteRaw = 0;
2234 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2235 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2236 return VINF_SUCCESS;
2237}
2238
2239
2240/**
2241 * Syncs back changes in the REM state to the the VM state.
2242 *
2243 * This must be called after invoking REMR3Run().
2244 * Calling it several times in a row is not permitted.
2245 *
2246 * @returns VBox status code.
2247 *
2248 * @param pVM VM Handle.
2249 * @param pVCpu VMCPU Handle.
2250 */
2251REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2252{
2253 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2254 Assert(pCtx);
2255 unsigned i;
2256
2257 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2258 Log2(("REMR3StateBack:\n"));
2259 Assert(pVM->rem.s.fInREM);
2260
2261 /*
2262 * Copy back the registers.
2263 * This is done in the order they are declared in the CPUMCTX structure.
2264 */
2265
2266 /** @todo FOP */
2267 /** @todo FPUIP */
2268 /** @todo CS */
2269 /** @todo FPUDP */
2270 /** @todo DS */
2271
2272 /** @todo check if FPU/XMM was actually used in the recompiler */
2273 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2274//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2275
2276#ifdef TARGET_X86_64
2277 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2278 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2279 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2280 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2281 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2282 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2283 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2284 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2285 pCtx->r8 = pVM->rem.s.Env.regs[8];
2286 pCtx->r9 = pVM->rem.s.Env.regs[9];
2287 pCtx->r10 = pVM->rem.s.Env.regs[10];
2288 pCtx->r11 = pVM->rem.s.Env.regs[11];
2289 pCtx->r12 = pVM->rem.s.Env.regs[12];
2290 pCtx->r13 = pVM->rem.s.Env.regs[13];
2291 pCtx->r14 = pVM->rem.s.Env.regs[14];
2292 pCtx->r15 = pVM->rem.s.Env.regs[15];
2293
2294 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2295
2296#else
2297 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2298 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2299 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2300 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2301 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2302 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2303 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2304
2305 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2306#endif
2307
2308 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2309
2310#ifdef VBOX_WITH_STATISTICS
2311 if (pVM->rem.s.Env.segs[R_SS].newselector)
2312 {
2313 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2314 }
2315 if (pVM->rem.s.Env.segs[R_GS].newselector)
2316 {
2317 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2318 }
2319 if (pVM->rem.s.Env.segs[R_FS].newselector)
2320 {
2321 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2322 }
2323 if (pVM->rem.s.Env.segs[R_ES].newselector)
2324 {
2325 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2326 }
2327 if (pVM->rem.s.Env.segs[R_DS].newselector)
2328 {
2329 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2330 }
2331 if (pVM->rem.s.Env.segs[R_CS].newselector)
2332 {
2333 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2334 }
2335#endif
2336 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2337 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2338 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2339 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2340 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2341
2342#ifdef TARGET_X86_64
2343 pCtx->rip = pVM->rem.s.Env.eip;
2344 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2345#else
2346 pCtx->eip = pVM->rem.s.Env.eip;
2347 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2348#endif
2349
2350 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2351 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2352 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2353 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2354 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2355 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2356
2357 for (i = 0; i < 8; i++)
2358 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2359
2360 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2361 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2362 {
2363 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2364 STAM_COUNTER_INC(&gStatREMGDTChange);
2365 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2366 }
2367
2368 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2369 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2370 {
2371 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2372 STAM_COUNTER_INC(&gStatREMIDTChange);
2373 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2374 }
2375
2376 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2377 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2378 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2379 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2380 {
2381 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2382 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2383 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2384 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2385 STAM_COUNTER_INC(&gStatREMLDTRChange);
2386 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2387 }
2388
2389 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2390 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2391 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2392 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2393 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2394 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2395 : 0) )
2396 {
2397 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2398 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2399 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2400 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2401 pCtx->tr = pVM->rem.s.Env.tr.selector;
2402 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2403 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2404 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2405 if (pCtx->trHid.Attr.u)
2406 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2407 STAM_COUNTER_INC(&gStatREMTRChange);
2408 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2409 }
2410
2411 /** @todo These values could still be out of sync! */
2412 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2413 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2414 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2415 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2416
2417 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2418 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2419 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2420
2421 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2422 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2423 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2424
2425 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2426 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2427 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2428
2429 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2430 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2431 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2432
2433 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2434 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2435 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2436
2437 /* Sysenter MSR */
2438 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2439 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2440 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2441
2442 /* System MSRs. */
2443 pCtx->msrEFER = pVM->rem.s.Env.efer;
2444 pCtx->msrSTAR = pVM->rem.s.Env.star;
2445 pCtx->msrPAT = pVM->rem.s.Env.pat;
2446#ifdef TARGET_X86_64
2447 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2448 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2449 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2450 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2451#endif
2452
2453 remR3TrapClear(pVM);
2454
2455 /*
2456 * Check for traps.
2457 */
2458 if ( pVM->rem.s.Env.exception_index >= 0
2459 && pVM->rem.s.Env.exception_index < 256)
2460 {
2461 int rc;
2462
2463 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2464 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2465 AssertRC(rc);
2466 switch (pVM->rem.s.Env.exception_index)
2467 {
2468 case 0x0e:
2469 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2470 /* fallthru */
2471 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2472 case 0x11: case 0x08: /* 0 */
2473 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2474 break;
2475 }
2476
2477 }
2478
2479 /*
2480 * We're not longer in REM mode.
2481 */
2482 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2483 pVM->rem.s.fInREM = false;
2484 pVM->rem.s.pCtx = NULL;
2485 pVM->rem.s.Env.pVCpu = NULL;
2486 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2487 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2488 return VINF_SUCCESS;
2489}
2490
2491
2492/**
2493 * This is called by the disassembler when it wants to update the cpu state
2494 * before for instance doing a register dump.
2495 */
2496static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2497{
2498 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2499 unsigned i;
2500
2501 Assert(pVM->rem.s.fInREM);
2502
2503 /*
2504 * Copy back the registers.
2505 * This is done in the order they are declared in the CPUMCTX structure.
2506 */
2507
2508 /** @todo FOP */
2509 /** @todo FPUIP */
2510 /** @todo CS */
2511 /** @todo FPUDP */
2512 /** @todo DS */
2513 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2514 pCtx->fpu.MXCSR = 0;
2515 pCtx->fpu.MXCSR_MASK = 0;
2516
2517 /** @todo check if FPU/XMM was actually used in the recompiler */
2518 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2519//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2520
2521#ifdef TARGET_X86_64
2522 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2523 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2524 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2525 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2526 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2527 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2528 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2529 pCtx->r8 = pVM->rem.s.Env.regs[8];
2530 pCtx->r9 = pVM->rem.s.Env.regs[9];
2531 pCtx->r10 = pVM->rem.s.Env.regs[10];
2532 pCtx->r11 = pVM->rem.s.Env.regs[11];
2533 pCtx->r12 = pVM->rem.s.Env.regs[12];
2534 pCtx->r13 = pVM->rem.s.Env.regs[13];
2535 pCtx->r14 = pVM->rem.s.Env.regs[14];
2536 pCtx->r15 = pVM->rem.s.Env.regs[15];
2537
2538 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2539#else
2540 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2541 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2542 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2543 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2544 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2545 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2546 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2547
2548 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2549#endif
2550
2551 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2552
2553 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2554 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2555 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2556 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2557 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2558
2559#ifdef TARGET_X86_64
2560 pCtx->rip = pVM->rem.s.Env.eip;
2561 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2562#else
2563 pCtx->eip = pVM->rem.s.Env.eip;
2564 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2565#endif
2566
2567 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2568 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2569 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2570 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2571 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2572 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2573
2574 for (i = 0; i < 8; i++)
2575 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2576
2577 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2578 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2579 {
2580 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2581 STAM_COUNTER_INC(&gStatREMGDTChange);
2582 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2583 }
2584
2585 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2586 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2587 {
2588 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2589 STAM_COUNTER_INC(&gStatREMIDTChange);
2590 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2591 }
2592
2593 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2594 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2595 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2596 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2597 {
2598 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2599 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2600 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2601 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2602 STAM_COUNTER_INC(&gStatREMLDTRChange);
2603 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2604 }
2605
2606 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2607 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2608 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2609 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2610 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2611 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2612 : 0) )
2613 {
2614 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2615 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2616 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2617 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2618 pCtx->tr = pVM->rem.s.Env.tr.selector;
2619 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2620 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2621 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2622 if (pCtx->trHid.Attr.u)
2623 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2624 STAM_COUNTER_INC(&gStatREMTRChange);
2625 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2626 }
2627
2628 /** @todo These values could still be out of sync! */
2629 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2630 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2631 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2632 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2633
2634 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2635 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2636 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2637
2638 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2639 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2640 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2641
2642 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2643 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2644 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2645
2646 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2647 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2648 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2649
2650 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2651 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2652 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2653
2654 /* Sysenter MSR */
2655 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2656 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2657 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2658
2659 /* System MSRs. */
2660 pCtx->msrEFER = pVM->rem.s.Env.efer;
2661 pCtx->msrSTAR = pVM->rem.s.Env.star;
2662 pCtx->msrPAT = pVM->rem.s.Env.pat;
2663#ifdef TARGET_X86_64
2664 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2665 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2666 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2667 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2668#endif
2669
2670}
2671
2672
2673/**
2674 * Update the VMM state information if we're currently in REM.
2675 *
2676 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2677 * we're currently executing in REM and the VMM state is invalid. This method will of
2678 * course check that we're executing in REM before syncing any data over to the VMM.
2679 *
2680 * @param pVM The VM handle.
2681 * @param pVCpu The VMCPU handle.
2682 */
2683REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2684{
2685 if (pVM->rem.s.fInREM)
2686 remR3StateUpdate(pVM, pVCpu);
2687}
2688
2689
2690#undef LOG_GROUP
2691#define LOG_GROUP LOG_GROUP_REM
2692
2693
2694/**
2695 * Notify the recompiler about Address Gate 20 state change.
2696 *
2697 * This notification is required since A20 gate changes are
2698 * initialized from a device driver and the VM might just as
2699 * well be in REM mode as in RAW mode.
2700 *
2701 * @param pVM VM handle.
2702 * @param pVCpu VMCPU handle.
2703 * @param fEnable True if the gate should be enabled.
2704 * False if the gate should be disabled.
2705 */
2706REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2707{
2708 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2709 VM_ASSERT_EMT(pVM);
2710
2711 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2712 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2713 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2714}
2715
2716
2717/**
2718 * Replays the handler notification changes
2719 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2720 *
2721 * @param pVM VM handle.
2722 */
2723REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2724{
2725 /*
2726 * Replay the flushes.
2727 */
2728 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2729 VM_ASSERT_EMT(pVM);
2730
2731 /** @todo this isn't ensuring correct replay order. */
2732 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2733 {
2734 uint32_t idxNext;
2735 uint32_t idxRevHead;
2736 uint32_t idxHead;
2737#ifdef VBOX_STRICT
2738 int32_t c = 0;
2739#endif
2740
2741 /* Lockless purging of pending notifications. */
2742 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2743 if (idxHead == UINT32_MAX)
2744 return;
2745 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2746
2747 /*
2748 * Reverse the list to process it in FIFO order.
2749 */
2750 idxRevHead = UINT32_MAX;
2751 do
2752 {
2753 /* Save the index of the next rec. */
2754 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2755 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2756 /* Push the record onto the reversed list. */
2757 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2758 idxRevHead = idxHead;
2759 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2760 /* Advance. */
2761 idxHead = idxNext;
2762 } while (idxHead != UINT32_MAX);
2763
2764 /*
2765 * Loop thru the list, reinserting the record into the free list as they are
2766 * processed to avoid having other EMTs running out of entries while we're flushing.
2767 */
2768 idxHead = idxRevHead;
2769 do
2770 {
2771 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2772 uint32_t idxCur;
2773 Assert(--c >= 0);
2774
2775 switch (pCur->enmKind)
2776 {
2777 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2778 remR3NotifyHandlerPhysicalRegister(pVM,
2779 pCur->u.PhysicalRegister.enmType,
2780 pCur->u.PhysicalRegister.GCPhys,
2781 pCur->u.PhysicalRegister.cb,
2782 pCur->u.PhysicalRegister.fHasHCHandler);
2783 break;
2784
2785 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2786 remR3NotifyHandlerPhysicalDeregister(pVM,
2787 pCur->u.PhysicalDeregister.enmType,
2788 pCur->u.PhysicalDeregister.GCPhys,
2789 pCur->u.PhysicalDeregister.cb,
2790 pCur->u.PhysicalDeregister.fHasHCHandler,
2791 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2792 break;
2793
2794 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2795 remR3NotifyHandlerPhysicalModify(pVM,
2796 pCur->u.PhysicalModify.enmType,
2797 pCur->u.PhysicalModify.GCPhysOld,
2798 pCur->u.PhysicalModify.GCPhysNew,
2799 pCur->u.PhysicalModify.cb,
2800 pCur->u.PhysicalModify.fHasHCHandler,
2801 pCur->u.PhysicalModify.fRestoreAsRAM);
2802 break;
2803
2804 default:
2805 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2806 break;
2807 }
2808
2809 /*
2810 * Advance idxHead.
2811 */
2812 idxCur = idxHead;
2813 idxHead = pCur->idxNext;
2814 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
2815
2816 /*
2817 * Put the record back into the free list.
2818 */
2819 do
2820 {
2821 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2822 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2823 ASMCompilerBarrier();
2824 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
2825 } while (idxHead != UINT32_MAX);
2826
2827/* Temporarily turned on for release builds to investigate #4113 */
2828#if 1 //def VBOX_STRICT
2829 if (pVM->cCpus == 1)
2830 {
2831 unsigned c;
2832 /* Check that all records are now on the free list. */
2833 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
2834 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
2835 c++;
2836 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
2837 }
2838#endif
2839 }
2840}
2841
2842
2843/**
2844 * Notify REM about changed code page.
2845 *
2846 * @returns VBox status code.
2847 * @param pVM VM handle.
2848 * @param pVCpu VMCPU handle.
2849 * @param pvCodePage Code page address
2850 */
2851REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2852{
2853#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2854 int rc;
2855 RTGCPHYS PhysGC;
2856 uint64_t flags;
2857
2858 VM_ASSERT_EMT(pVM);
2859
2860 /*
2861 * Get the physical page address.
2862 */
2863 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2864 if (rc == VINF_SUCCESS)
2865 {
2866 /*
2867 * Sync the required registers and flush the whole page.
2868 * (Easier to do the whole page than notifying it about each physical
2869 * byte that was changed.
2870 */
2871 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2872 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2873 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2874 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2875
2876 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2877 }
2878#endif
2879 return VINF_SUCCESS;
2880}
2881
2882
2883/**
2884 * Notification about a successful MMR3PhysRegister() call.
2885 *
2886 * @param pVM VM handle.
2887 * @param GCPhys The physical address the RAM.
2888 * @param cb Size of the memory.
2889 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2890 */
2891REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2892{
2893 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2894 VM_ASSERT_EMT(pVM);
2895
2896 /*
2897 * Validate input - we trust the caller.
2898 */
2899 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2900 Assert(cb);
2901 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2902 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2903
2904 /*
2905 * Base ram? Update GCPhysLastRam.
2906 */
2907 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2908 {
2909 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2910 {
2911 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2912 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2913 }
2914 }
2915
2916 /*
2917 * Register the ram.
2918 */
2919 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2920
2921 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2922 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2923 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2924
2925 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2926}
2927
2928
2929/**
2930 * Notification about a successful MMR3PhysRomRegister() call.
2931 *
2932 * @param pVM VM handle.
2933 * @param GCPhys The physical address of the ROM.
2934 * @param cb The size of the ROM.
2935 * @param pvCopy Pointer to the ROM copy.
2936 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2937 * This function will be called when ever the protection of the
2938 * shadow ROM changes (at reset and end of POST).
2939 */
2940REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2941{
2942 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2943 VM_ASSERT_EMT(pVM);
2944
2945 /*
2946 * Validate input - we trust the caller.
2947 */
2948 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2949 Assert(cb);
2950 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2951
2952 /*
2953 * Register the rom.
2954 */
2955 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2956
2957 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2958 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2959 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2960
2961 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2962}
2963
2964
2965/**
2966 * Notification about a successful memory deregistration or reservation.
2967 *
2968 * @param pVM VM Handle.
2969 * @param GCPhys Start physical address.
2970 * @param cb The size of the range.
2971 */
2972REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2973{
2974 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2975 VM_ASSERT_EMT(pVM);
2976
2977 /*
2978 * Validate input - we trust the caller.
2979 */
2980 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2981 Assert(cb);
2982 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2983
2984 /*
2985 * Unassigning the memory.
2986 */
2987 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2988
2989 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2990 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2991 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2992
2993 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2994}
2995
2996
2997/**
2998 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2999 *
3000 * @param pVM VM Handle.
3001 * @param enmType Handler type.
3002 * @param GCPhys Handler range address.
3003 * @param cb Size of the handler range.
3004 * @param fHasHCHandler Set if the handler has a HC callback function.
3005 *
3006 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3007 * Handler memory type to memory which has no HC handler.
3008 */
3009static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3010{
3011 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3012 enmType, GCPhys, cb, fHasHCHandler));
3013
3014 VM_ASSERT_EMT(pVM);
3015 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3016 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3017
3018
3019 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3020
3021 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3022 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3023 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3024 else if (fHasHCHandler)
3025 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3026 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3027
3028 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3029}
3030
3031/**
3032 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3033 *
3034 * @param pVM VM Handle.
3035 * @param enmType Handler type.
3036 * @param GCPhys Handler range address.
3037 * @param cb Size of the handler range.
3038 * @param fHasHCHandler Set if the handler has a HC callback function.
3039 *
3040 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3041 * Handler memory type to memory which has no HC handler.
3042 */
3043REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3044{
3045 REMR3ReplayHandlerNotifications(pVM);
3046
3047 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3048}
3049
3050/**
3051 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3052 *
3053 * @param pVM VM Handle.
3054 * @param enmType Handler type.
3055 * @param GCPhys Handler range address.
3056 * @param cb Size of the handler range.
3057 * @param fHasHCHandler Set if the handler has a HC callback function.
3058 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3059 */
3060static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3061{
3062 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3063 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3064 VM_ASSERT_EMT(pVM);
3065
3066
3067 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3068
3069 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3070 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3071 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3072 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3073 else if (fHasHCHandler)
3074 {
3075 if (!fRestoreAsRAM)
3076 {
3077 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3078 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3079 }
3080 else
3081 {
3082 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3083 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3084 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3085 }
3086 }
3087 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3088
3089 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3090}
3091
3092/**
3093 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3094 *
3095 * @param pVM VM Handle.
3096 * @param enmType Handler type.
3097 * @param GCPhys Handler range address.
3098 * @param cb Size of the handler range.
3099 * @param fHasHCHandler Set if the handler has a HC callback function.
3100 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3101 */
3102REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3103{
3104 REMR3ReplayHandlerNotifications(pVM);
3105 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3106}
3107
3108
3109/**
3110 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3111 *
3112 * @param pVM VM Handle.
3113 * @param enmType Handler type.
3114 * @param GCPhysOld Old handler range address.
3115 * @param GCPhysNew New handler range address.
3116 * @param cb Size of the handler range.
3117 * @param fHasHCHandler Set if the handler has a HC callback function.
3118 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3119 */
3120static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3121{
3122 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3123 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3124 VM_ASSERT_EMT(pVM);
3125 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3126
3127 if (fHasHCHandler)
3128 {
3129 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3130
3131 /*
3132 * Reset the old page.
3133 */
3134 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3135 if (!fRestoreAsRAM)
3136 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3137 else
3138 {
3139 /* This is not perfect, but it'll do for PD monitoring... */
3140 Assert(cb == PAGE_SIZE);
3141 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3142 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3143 }
3144
3145 /*
3146 * Update the new page.
3147 */
3148 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3149 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3150 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3151 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3152
3153 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3154 }
3155}
3156
3157/**
3158 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3159 *
3160 * @param pVM VM Handle.
3161 * @param enmType Handler type.
3162 * @param GCPhysOld Old handler range address.
3163 * @param GCPhysNew New handler range address.
3164 * @param cb Size of the handler range.
3165 * @param fHasHCHandler Set if the handler has a HC callback function.
3166 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3167 */
3168REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3169{
3170 REMR3ReplayHandlerNotifications(pVM);
3171
3172 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3173}
3174
3175/**
3176 * Checks if we're handling access to this page or not.
3177 *
3178 * @returns true if we're trapping access.
3179 * @returns false if we aren't.
3180 * @param pVM The VM handle.
3181 * @param GCPhys The physical address.
3182 *
3183 * @remark This function will only work correctly in VBOX_STRICT builds!
3184 */
3185REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3186{
3187#ifdef VBOX_STRICT
3188 unsigned long off;
3189 REMR3ReplayHandlerNotifications(pVM);
3190
3191 off = get_phys_page_offset(GCPhys);
3192 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3193 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3194 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3195#else
3196 return false;
3197#endif
3198}
3199
3200
3201/**
3202 * Deals with a rare case in get_phys_addr_code where the code
3203 * is being monitored.
3204 *
3205 * It could also be an MMIO page, in which case we will raise a fatal error.
3206 *
3207 * @returns The physical address corresponding to addr.
3208 * @param env The cpu environment.
3209 * @param addr The virtual address.
3210 * @param pTLBEntry The TLB entry.
3211 */
3212target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3213 target_ulong addr,
3214 CPUTLBEntry* pTLBEntry,
3215 target_phys_addr_t ioTLBEntry)
3216{
3217 PVM pVM = env->pVM;
3218
3219 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3220 {
3221 /* If code memory is being monitored, appropriate IOTLB entry will have
3222 handler IO type, and addend will provide real physical address, no
3223 matter if we store VA in TLB or not, as handlers are always passed PA */
3224 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3225 return ret;
3226 }
3227 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3228 "*** handlers\n",
3229 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3230 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3231 LogRel(("*** mmio\n"));
3232 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3233 LogRel(("*** phys\n"));
3234 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3235 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3236 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3237 AssertFatalFailed();
3238}
3239
3240/**
3241 * Read guest RAM and ROM.
3242 *
3243 * @param SrcGCPhys The source address (guest physical).
3244 * @param pvDst The destination address.
3245 * @param cb Number of bytes
3246 */
3247void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3248{
3249 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3250 VBOX_CHECK_ADDR(SrcGCPhys);
3251 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3252#ifdef VBOX_DEBUG_PHYS
3253 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3254#endif
3255 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3256}
3257
3258
3259/**
3260 * Read guest RAM and ROM, unsigned 8-bit.
3261 *
3262 * @param SrcGCPhys The source address (guest physical).
3263 */
3264RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3265{
3266 uint8_t val;
3267 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3268 VBOX_CHECK_ADDR(SrcGCPhys);
3269 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3270 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3271#ifdef VBOX_DEBUG_PHYS
3272 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3273#endif
3274 return val;
3275}
3276
3277
3278/**
3279 * Read guest RAM and ROM, signed 8-bit.
3280 *
3281 * @param SrcGCPhys The source address (guest physical).
3282 */
3283RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3284{
3285 int8_t val;
3286 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3287 VBOX_CHECK_ADDR(SrcGCPhys);
3288 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3289 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3290#ifdef VBOX_DEBUG_PHYS
3291 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3292#endif
3293 return val;
3294}
3295
3296
3297/**
3298 * Read guest RAM and ROM, unsigned 16-bit.
3299 *
3300 * @param SrcGCPhys The source address (guest physical).
3301 */
3302RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3303{
3304 uint16_t val;
3305 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3306 VBOX_CHECK_ADDR(SrcGCPhys);
3307 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3308 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3309#ifdef VBOX_DEBUG_PHYS
3310 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3311#endif
3312 return val;
3313}
3314
3315
3316/**
3317 * Read guest RAM and ROM, signed 16-bit.
3318 *
3319 * @param SrcGCPhys The source address (guest physical).
3320 */
3321RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3322{
3323 int16_t val;
3324 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3325 VBOX_CHECK_ADDR(SrcGCPhys);
3326 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3327 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3328#ifdef VBOX_DEBUG_PHYS
3329 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3330#endif
3331 return val;
3332}
3333
3334
3335/**
3336 * Read guest RAM and ROM, unsigned 32-bit.
3337 *
3338 * @param SrcGCPhys The source address (guest physical).
3339 */
3340RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3341{
3342 uint32_t val;
3343 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3344 VBOX_CHECK_ADDR(SrcGCPhys);
3345 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3346 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3347#ifdef VBOX_DEBUG_PHYS
3348 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3349#endif
3350 return val;
3351}
3352
3353
3354/**
3355 * Read guest RAM and ROM, signed 32-bit.
3356 *
3357 * @param SrcGCPhys The source address (guest physical).
3358 */
3359RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3360{
3361 int32_t val;
3362 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3363 VBOX_CHECK_ADDR(SrcGCPhys);
3364 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3365 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3366#ifdef VBOX_DEBUG_PHYS
3367 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3368#endif
3369 return val;
3370}
3371
3372
3373/**
3374 * Read guest RAM and ROM, unsigned 64-bit.
3375 *
3376 * @param SrcGCPhys The source address (guest physical).
3377 */
3378uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3379{
3380 uint64_t val;
3381 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3382 VBOX_CHECK_ADDR(SrcGCPhys);
3383 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3384 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3385#ifdef VBOX_DEBUG_PHYS
3386 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3387#endif
3388 return val;
3389}
3390
3391
3392/**
3393 * Read guest RAM and ROM, signed 64-bit.
3394 *
3395 * @param SrcGCPhys The source address (guest physical).
3396 */
3397int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3398{
3399 int64_t val;
3400 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3401 VBOX_CHECK_ADDR(SrcGCPhys);
3402 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3403 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3404#ifdef VBOX_DEBUG_PHYS
3405 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3406#endif
3407 return val;
3408}
3409
3410
3411/**
3412 * Write guest RAM.
3413 *
3414 * @param DstGCPhys The destination address (guest physical).
3415 * @param pvSrc The source address.
3416 * @param cb Number of bytes to write
3417 */
3418void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3419{
3420 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3421 VBOX_CHECK_ADDR(DstGCPhys);
3422 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3423 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3424#ifdef VBOX_DEBUG_PHYS
3425 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3426#endif
3427}
3428
3429
3430/**
3431 * Write guest RAM, unsigned 8-bit.
3432 *
3433 * @param DstGCPhys The destination address (guest physical).
3434 * @param val Value
3435 */
3436void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3437{
3438 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3439 VBOX_CHECK_ADDR(DstGCPhys);
3440 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3441 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3442#ifdef VBOX_DEBUG_PHYS
3443 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3444#endif
3445}
3446
3447
3448/**
3449 * Write guest RAM, unsigned 8-bit.
3450 *
3451 * @param DstGCPhys The destination address (guest physical).
3452 * @param val Value
3453 */
3454void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3455{
3456 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3457 VBOX_CHECK_ADDR(DstGCPhys);
3458 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3459 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3460#ifdef VBOX_DEBUG_PHYS
3461 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3462#endif
3463}
3464
3465
3466/**
3467 * Write guest RAM, unsigned 32-bit.
3468 *
3469 * @param DstGCPhys The destination address (guest physical).
3470 * @param val Value
3471 */
3472void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3473{
3474 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3475 VBOX_CHECK_ADDR(DstGCPhys);
3476 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3477 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3478#ifdef VBOX_DEBUG_PHYS
3479 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3480#endif
3481}
3482
3483
3484/**
3485 * Write guest RAM, unsigned 64-bit.
3486 *
3487 * @param DstGCPhys The destination address (guest physical).
3488 * @param val Value
3489 */
3490void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3491{
3492 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3493 VBOX_CHECK_ADDR(DstGCPhys);
3494 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3495 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3496#ifdef VBOX_DEBUG_PHYS
3497 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3498#endif
3499}
3500
3501#undef LOG_GROUP
3502#define LOG_GROUP LOG_GROUP_REM_MMIO
3503
3504/** Read MMIO memory. */
3505static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3506{
3507 uint32_t u32 = 0;
3508 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3509 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3510 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3511 return u32;
3512}
3513
3514/** Read MMIO memory. */
3515static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3516{
3517 uint32_t u32 = 0;
3518 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3519 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3520 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3521 return u32;
3522}
3523
3524/** Read MMIO memory. */
3525static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3526{
3527 uint32_t u32 = 0;
3528 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3529 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3530 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3531 return u32;
3532}
3533
3534/** Write to MMIO memory. */
3535static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3536{
3537 int rc;
3538 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3539 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3540 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3541}
3542
3543/** Write to MMIO memory. */
3544static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3545{
3546 int rc;
3547 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3548 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3549 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3550}
3551
3552/** Write to MMIO memory. */
3553static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3554{
3555 int rc;
3556 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3557 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3558 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3559}
3560
3561
3562#undef LOG_GROUP
3563#define LOG_GROUP LOG_GROUP_REM_HANDLER
3564
3565/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3566
3567static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3568{
3569 uint8_t u8;
3570 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3571 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3572 return u8;
3573}
3574
3575static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3576{
3577 uint16_t u16;
3578 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3579 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3580 return u16;
3581}
3582
3583static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3584{
3585 uint32_t u32;
3586 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3587 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3588 return u32;
3589}
3590
3591static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3592{
3593 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3594 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3595}
3596
3597static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3598{
3599 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3600 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3601}
3602
3603static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3604{
3605 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3606 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3607}
3608
3609/* -+- disassembly -+- */
3610
3611#undef LOG_GROUP
3612#define LOG_GROUP LOG_GROUP_REM_DISAS
3613
3614
3615/**
3616 * Enables or disables singled stepped disassembly.
3617 *
3618 * @returns VBox status code.
3619 * @param pVM VM handle.
3620 * @param fEnable To enable set this flag, to disable clear it.
3621 */
3622static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3623{
3624 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3625 VM_ASSERT_EMT(pVM);
3626
3627 if (fEnable)
3628 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3629 else
3630 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3631 return VINF_SUCCESS;
3632}
3633
3634
3635/**
3636 * Enables or disables singled stepped disassembly.
3637 *
3638 * @returns VBox status code.
3639 * @param pVM VM handle.
3640 * @param fEnable To enable set this flag, to disable clear it.
3641 */
3642REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3643{
3644 int rc;
3645
3646 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3647 if (VM_IS_EMT(pVM))
3648 return remR3DisasEnableStepping(pVM, fEnable);
3649
3650 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3651 AssertRC(rc);
3652 return rc;
3653}
3654
3655
3656#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3657/**
3658 * External Debugger Command: .remstep [on|off|1|0]
3659 */
3660static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3661{
3662 bool fEnable;
3663 int rc;
3664
3665 /* print status */
3666 if (cArgs == 0)
3667 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3668 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3669
3670 /* convert the argument and change the mode. */
3671 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3672 if (RT_FAILURE(rc))
3673 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3674 rc = REMR3DisasEnableStepping(pVM, fEnable);
3675 if (RT_FAILURE(rc))
3676 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3677 return rc;
3678}
3679#endif
3680
3681
3682/**
3683 * Disassembles one instruction and prints it to the log.
3684 *
3685 * @returns Success indicator.
3686 * @param env Pointer to the recompiler CPU structure.
3687 * @param f32BitCode Indicates that whether or not the code should
3688 * be disassembled as 16 or 32 bit. If -1 the CS
3689 * selector will be inspected.
3690 * @param pszPrefix
3691 */
3692bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3693{
3694 PVM pVM = env->pVM;
3695 const bool fLog = LogIsEnabled();
3696 const bool fLog2 = LogIs2Enabled();
3697 int rc = VINF_SUCCESS;
3698
3699 /*
3700 * Don't bother if there ain't any log output to do.
3701 */
3702 if (!fLog && !fLog2)
3703 return true;
3704
3705 /*
3706 * Update the state so DBGF reads the correct register values.
3707 */
3708 remR3StateUpdate(pVM, env->pVCpu);
3709
3710 /*
3711 * Log registers if requested.
3712 */
3713 if (!fLog2)
3714 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3715
3716 /*
3717 * Disassemble to log.
3718 */
3719 if (fLog)
3720 rc = DBGFR3DisasInstrCurrentLogInternal(env->pVCpu, pszPrefix);
3721
3722 return RT_SUCCESS(rc);
3723}
3724
3725
3726/**
3727 * Disassemble recompiled code.
3728 *
3729 * @param phFileIgnored Ignored, logfile usually.
3730 * @param pvCode Pointer to the code block.
3731 * @param cb Size of the code block.
3732 */
3733void disas(FILE *phFile, void *pvCode, unsigned long cb)
3734{
3735#ifdef DEBUG_TMP_LOGGING
3736# define DISAS_PRINTF(x...) fprintf(phFile, x)
3737#else
3738# define DISAS_PRINTF(x...) RTLogPrintf(x)
3739 if (LogIs2Enabled())
3740#endif
3741 {
3742 unsigned off = 0;
3743 char szOutput[256];
3744 DISCPUSTATE Cpu;
3745
3746 memset(&Cpu, 0, sizeof(Cpu));
3747#ifdef RT_ARCH_X86
3748 Cpu.mode = CPUMODE_32BIT;
3749#else
3750 Cpu.mode = CPUMODE_64BIT;
3751#endif
3752
3753 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3754 while (off < cb)
3755 {
3756 uint32_t cbInstr;
3757 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3758 DISAS_PRINTF("%s", szOutput);
3759 else
3760 {
3761 DISAS_PRINTF("disas error\n");
3762 cbInstr = 1;
3763#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3764 break;
3765#endif
3766 }
3767 off += cbInstr;
3768 }
3769 }
3770
3771#undef DISAS_PRINTF
3772}
3773
3774
3775/**
3776 * Disassemble guest code.
3777 *
3778 * @param phFileIgnored Ignored, logfile usually.
3779 * @param uCode The guest address of the code to disassemble. (flat?)
3780 * @param cb Number of bytes to disassemble.
3781 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3782 */
3783void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3784{
3785#ifdef DEBUG_TMP_LOGGING
3786# define DISAS_PRINTF(x...) fprintf(phFile, x)
3787#else
3788# define DISAS_PRINTF(x...) RTLogPrintf(x)
3789 if (LogIs2Enabled())
3790#endif
3791 {
3792 PVM pVM = cpu_single_env->pVM;
3793 PVMCPU pVCpu = cpu_single_env->pVCpu;
3794 RTSEL cs;
3795 RTGCUINTPTR eip;
3796
3797 Assert(pVCpu);
3798
3799 /*
3800 * Update the state so DBGF reads the correct register values (flags).
3801 */
3802 remR3StateUpdate(pVM, pVCpu);
3803
3804 /*
3805 * Do the disassembling.
3806 */
3807 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3808 cs = cpu_single_env->segs[R_CS].selector;
3809 eip = uCode - cpu_single_env->segs[R_CS].base;
3810 for (;;)
3811 {
3812 char szBuf[256];
3813 uint32_t cbInstr;
3814 int rc = DBGFR3DisasInstrEx(pVM,
3815 pVCpu->idCpu,
3816 cs,
3817 eip,
3818 0,
3819 szBuf, sizeof(szBuf),
3820 &cbInstr);
3821 if (RT_SUCCESS(rc))
3822 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3823 else
3824 {
3825 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3826 cbInstr = 1;
3827 }
3828
3829 /* next */
3830 if (cb <= cbInstr)
3831 break;
3832 cb -= cbInstr;
3833 uCode += cbInstr;
3834 eip += cbInstr;
3835 }
3836 }
3837#undef DISAS_PRINTF
3838}
3839
3840
3841/**
3842 * Looks up a guest symbol.
3843 *
3844 * @returns Pointer to symbol name. This is a static buffer.
3845 * @param orig_addr The address in question.
3846 */
3847const char *lookup_symbol(target_ulong orig_addr)
3848{
3849 PVM pVM = cpu_single_env->pVM;
3850 RTGCINTPTR off = 0;
3851 RTDBGSYMBOL Sym;
3852 DBGFADDRESS Addr;
3853
3854 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
3855 if (RT_SUCCESS(rc))
3856 {
3857 static char szSym[sizeof(Sym.szName) + 48];
3858 if (!off)
3859 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3860 else if (off > 0)
3861 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3862 else
3863 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3864 return szSym;
3865 }
3866 return "<N/A>";
3867}
3868
3869
3870#undef LOG_GROUP
3871#define LOG_GROUP LOG_GROUP_REM
3872
3873
3874/* -+- FF notifications -+- */
3875
3876
3877/**
3878 * Notification about a pending interrupt.
3879 *
3880 * @param pVM VM Handle.
3881 * @param pVCpu VMCPU Handle.
3882 * @param u8Interrupt Interrupt
3883 * @thread The emulation thread.
3884 */
3885REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3886{
3887 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3888 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3889}
3890
3891/**
3892 * Notification about a pending interrupt.
3893 *
3894 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3895 * @param pVM VM Handle.
3896 * @param pVCpu VMCPU Handle.
3897 * @thread The emulation thread.
3898 */
3899REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3900{
3901 return pVM->rem.s.u32PendingInterrupt;
3902}
3903
3904/**
3905 * Notification about the interrupt FF being set.
3906 *
3907 * @param pVM VM Handle.
3908 * @param pVCpu VMCPU Handle.
3909 * @thread The emulation thread.
3910 */
3911REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3912{
3913 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3914 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3915 if (pVM->rem.s.fInREM)
3916 {
3917 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3918 CPU_INTERRUPT_EXTERNAL_HARD);
3919 }
3920}
3921
3922
3923/**
3924 * Notification about the interrupt FF being set.
3925 *
3926 * @param pVM VM Handle.
3927 * @param pVCpu VMCPU Handle.
3928 * @thread Any.
3929 */
3930REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3931{
3932 LogFlow(("REMR3NotifyInterruptClear:\n"));
3933 if (pVM->rem.s.fInREM)
3934 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3935}
3936
3937
3938/**
3939 * Notification about pending timer(s).
3940 *
3941 * @param pVM VM Handle.
3942 * @param pVCpuDst The target cpu for this notification.
3943 * TM will not broadcast pending timer events, but use
3944 * a decidated EMT for them. So, only interrupt REM
3945 * execution if the given CPU is executing in REM.
3946 * @thread Any.
3947 */
3948REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3949{
3950#ifndef DEBUG_bird
3951 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3952#endif
3953 if (pVM->rem.s.fInREM)
3954 {
3955 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3956 {
3957 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3958 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3959 CPU_INTERRUPT_EXTERNAL_TIMER);
3960 }
3961 else
3962 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3963 }
3964 else
3965 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3966}
3967
3968
3969/**
3970 * Notification about pending DMA transfers.
3971 *
3972 * @param pVM VM Handle.
3973 * @thread Any.
3974 */
3975REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3976{
3977 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3978 if (pVM->rem.s.fInREM)
3979 {
3980 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3981 CPU_INTERRUPT_EXTERNAL_DMA);
3982 }
3983}
3984
3985
3986/**
3987 * Notification about pending timer(s).
3988 *
3989 * @param pVM VM Handle.
3990 * @thread Any.
3991 */
3992REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3993{
3994 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3995 if (pVM->rem.s.fInREM)
3996 {
3997 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3998 CPU_INTERRUPT_EXTERNAL_EXIT);
3999 }
4000}
4001
4002
4003/**
4004 * Notification about pending FF set by an external thread.
4005 *
4006 * @param pVM VM handle.
4007 * @thread Any.
4008 */
4009REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4010{
4011 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4012 if (pVM->rem.s.fInREM)
4013 {
4014 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4015 CPU_INTERRUPT_EXTERNAL_EXIT);
4016 }
4017}
4018
4019
4020#ifdef VBOX_WITH_STATISTICS
4021void remR3ProfileStart(int statcode)
4022{
4023 STAMPROFILEADV *pStat;
4024 switch(statcode)
4025 {
4026 case STATS_EMULATE_SINGLE_INSTR:
4027 pStat = &gStatExecuteSingleInstr;
4028 break;
4029 case STATS_QEMU_COMPILATION:
4030 pStat = &gStatCompilationQEmu;
4031 break;
4032 case STATS_QEMU_RUN_EMULATED_CODE:
4033 pStat = &gStatRunCodeQEmu;
4034 break;
4035 case STATS_QEMU_TOTAL:
4036 pStat = &gStatTotalTimeQEmu;
4037 break;
4038 case STATS_QEMU_RUN_TIMERS:
4039 pStat = &gStatTimers;
4040 break;
4041 case STATS_TLB_LOOKUP:
4042 pStat= &gStatTBLookup;
4043 break;
4044 case STATS_IRQ_HANDLING:
4045 pStat= &gStatIRQ;
4046 break;
4047 case STATS_RAW_CHECK:
4048 pStat = &gStatRawCheck;
4049 break;
4050
4051 default:
4052 AssertMsgFailed(("unknown stat %d\n", statcode));
4053 return;
4054 }
4055 STAM_PROFILE_ADV_START(pStat, a);
4056}
4057
4058
4059void remR3ProfileStop(int statcode)
4060{
4061 STAMPROFILEADV *pStat;
4062 switch(statcode)
4063 {
4064 case STATS_EMULATE_SINGLE_INSTR:
4065 pStat = &gStatExecuteSingleInstr;
4066 break;
4067 case STATS_QEMU_COMPILATION:
4068 pStat = &gStatCompilationQEmu;
4069 break;
4070 case STATS_QEMU_RUN_EMULATED_CODE:
4071 pStat = &gStatRunCodeQEmu;
4072 break;
4073 case STATS_QEMU_TOTAL:
4074 pStat = &gStatTotalTimeQEmu;
4075 break;
4076 case STATS_QEMU_RUN_TIMERS:
4077 pStat = &gStatTimers;
4078 break;
4079 case STATS_TLB_LOOKUP:
4080 pStat= &gStatTBLookup;
4081 break;
4082 case STATS_IRQ_HANDLING:
4083 pStat= &gStatIRQ;
4084 break;
4085 case STATS_RAW_CHECK:
4086 pStat = &gStatRawCheck;
4087 break;
4088 default:
4089 AssertMsgFailed(("unknown stat %d\n", statcode));
4090 return;
4091 }
4092 STAM_PROFILE_ADV_STOP(pStat, a);
4093}
4094#endif
4095
4096/**
4097 * Raise an RC, force rem exit.
4098 *
4099 * @param pVM VM handle.
4100 * @param rc The rc.
4101 */
4102void remR3RaiseRC(PVM pVM, int rc)
4103{
4104 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4105 Assert(pVM->rem.s.fInREM);
4106 VM_ASSERT_EMT(pVM);
4107 pVM->rem.s.rc = rc;
4108 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4109}
4110
4111
4112/* -+- timers -+- */
4113
4114uint64_t cpu_get_tsc(CPUX86State *env)
4115{
4116 STAM_COUNTER_INC(&gStatCpuGetTSC);
4117 return TMCpuTickGet(env->pVCpu);
4118}
4119
4120
4121/* -+- interrupts -+- */
4122
4123void cpu_set_ferr(CPUX86State *env)
4124{
4125 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4126 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4127}
4128
4129int cpu_get_pic_interrupt(CPUState *env)
4130{
4131 uint8_t u8Interrupt;
4132 int rc;
4133
4134 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4135 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4136 * with the (a)pic.
4137 */
4138 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4139 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4140 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4141 * remove this kludge. */
4142 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4143 {
4144 rc = VINF_SUCCESS;
4145 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4146 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4147 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4148 }
4149 else
4150 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4151
4152 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4153 if (RT_SUCCESS(rc))
4154 {
4155 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4156 env->interrupt_request |= CPU_INTERRUPT_HARD;
4157 return u8Interrupt;
4158 }
4159 return -1;
4160}
4161
4162
4163/* -+- local apic -+- */
4164
4165void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4166{
4167 int rc = PDMApicSetBase(env->pVM, val);
4168 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4169}
4170
4171uint64_t cpu_get_apic_base(CPUX86State *env)
4172{
4173 uint64_t u64;
4174 int rc = PDMApicGetBase(env->pVM, &u64);
4175 if (RT_SUCCESS(rc))
4176 {
4177 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4178 return u64;
4179 }
4180 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4181 return 0;
4182}
4183
4184void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4185{
4186 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4187 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4188}
4189
4190uint8_t cpu_get_apic_tpr(CPUX86State *env)
4191{
4192 uint8_t u8;
4193 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4194 if (RT_SUCCESS(rc))
4195 {
4196 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4197 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4198 }
4199 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4200 return 0;
4201}
4202
4203
4204uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4205{
4206 uint64_t value;
4207 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4208 if (RT_SUCCESS(rc))
4209 {
4210 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4211 return value;
4212 }
4213 /** @todo: exception ? */
4214 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4215 return value;
4216}
4217
4218void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4219{
4220 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4221 /** @todo: exception if error ? */
4222 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4223}
4224
4225uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4226{
4227 Assert(env->pVCpu);
4228 return CPUMGetGuestMsr(env->pVCpu, msr);
4229}
4230
4231void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4232{
4233 Assert(env->pVCpu);
4234 CPUMSetGuestMsr(env->pVCpu, msr, val);
4235}
4236
4237/* -+- I/O Ports -+- */
4238
4239#undef LOG_GROUP
4240#define LOG_GROUP LOG_GROUP_REM_IOPORT
4241
4242void cpu_outb(CPUState *env, int addr, int val)
4243{
4244 int rc;
4245
4246 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4247 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4248
4249 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4250 if (RT_LIKELY(rc == VINF_SUCCESS))
4251 return;
4252 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4253 {
4254 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4255 remR3RaiseRC(env->pVM, rc);
4256 return;
4257 }
4258 remAbort(rc, __FUNCTION__);
4259}
4260
4261void cpu_outw(CPUState *env, int addr, int val)
4262{
4263 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4264 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4265 if (RT_LIKELY(rc == VINF_SUCCESS))
4266 return;
4267 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4268 {
4269 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4270 remR3RaiseRC(env->pVM, rc);
4271 return;
4272 }
4273 remAbort(rc, __FUNCTION__);
4274}
4275
4276void cpu_outl(CPUState *env, int addr, int val)
4277{
4278 int rc;
4279 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4280 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4281 if (RT_LIKELY(rc == VINF_SUCCESS))
4282 return;
4283 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4284 {
4285 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4286 remR3RaiseRC(env->pVM, rc);
4287 return;
4288 }
4289 remAbort(rc, __FUNCTION__);
4290}
4291
4292int cpu_inb(CPUState *env, int addr)
4293{
4294 uint32_t u32 = 0;
4295 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4296 if (RT_LIKELY(rc == VINF_SUCCESS))
4297 {
4298 if (/*addr != 0x61 && */addr != 0x71)
4299 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4300 return (int)u32;
4301 }
4302 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4303 {
4304 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4305 remR3RaiseRC(env->pVM, rc);
4306 return (int)u32;
4307 }
4308 remAbort(rc, __FUNCTION__);
4309 return 0xff;
4310}
4311
4312int cpu_inw(CPUState *env, int addr)
4313{
4314 uint32_t u32 = 0;
4315 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4316 if (RT_LIKELY(rc == VINF_SUCCESS))
4317 {
4318 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4319 return (int)u32;
4320 }
4321 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4322 {
4323 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4324 remR3RaiseRC(env->pVM, rc);
4325 return (int)u32;
4326 }
4327 remAbort(rc, __FUNCTION__);
4328 return 0xffff;
4329}
4330
4331int cpu_inl(CPUState *env, int addr)
4332{
4333 uint32_t u32 = 0;
4334 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4335 if (RT_LIKELY(rc == VINF_SUCCESS))
4336 {
4337//if (addr==0x01f0 && u32 == 0x6b6d)
4338// loglevel = ~0;
4339 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4340 return (int)u32;
4341 }
4342 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4343 {
4344 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4345 remR3RaiseRC(env->pVM, rc);
4346 return (int)u32;
4347 }
4348 remAbort(rc, __FUNCTION__);
4349 return 0xffffffff;
4350}
4351
4352#undef LOG_GROUP
4353#define LOG_GROUP LOG_GROUP_REM
4354
4355
4356/* -+- helpers and misc other interfaces -+- */
4357
4358/**
4359 * Perform the CPUID instruction.
4360 *
4361 * ASMCpuId cannot be invoked from some source files where this is used because of global
4362 * register allocations.
4363 *
4364 * @param env Pointer to the recompiler CPU structure.
4365 * @param uOperator CPUID operation (eax).
4366 * @param pvEAX Where to store eax.
4367 * @param pvEBX Where to store ebx.
4368 * @param pvECX Where to store ecx.
4369 * @param pvEDX Where to store edx.
4370 */
4371void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4372{
4373 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4374}
4375
4376
4377#if 0 /* not used */
4378/**
4379 * Interface for qemu hardware to report back fatal errors.
4380 */
4381void hw_error(const char *pszFormat, ...)
4382{
4383 /*
4384 * Bitch about it.
4385 */
4386 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4387 * this in my Odin32 tree at home! */
4388 va_list args;
4389 va_start(args, pszFormat);
4390 RTLogPrintf("fatal error in virtual hardware:");
4391 RTLogPrintfV(pszFormat, args);
4392 va_end(args);
4393 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4394
4395 /*
4396 * If we're in REM context we'll sync back the state before 'jumping' to
4397 * the EMs failure handling.
4398 */
4399 PVM pVM = cpu_single_env->pVM;
4400 if (pVM->rem.s.fInREM)
4401 REMR3StateBack(pVM);
4402 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4403 AssertMsgFailed(("EMR3FatalError returned!\n"));
4404}
4405#endif
4406
4407/**
4408 * Interface for the qemu cpu to report unhandled situation
4409 * raising a fatal VM error.
4410 */
4411void cpu_abort(CPUState *env, const char *pszFormat, ...)
4412{
4413 va_list va;
4414 PVM pVM;
4415 PVMCPU pVCpu;
4416 char szMsg[256];
4417
4418 /*
4419 * Bitch about it.
4420 */
4421 RTLogFlags(NULL, "nodisabled nobuffered");
4422 RTLogFlush(NULL);
4423
4424 va_start(va, pszFormat);
4425#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4426 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4427 unsigned cArgs = 0;
4428 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4429 const char *psz = strchr(pszFormat, '%');
4430 while (psz && cArgs < 6)
4431 {
4432 auArgs[cArgs++] = va_arg(va, uintptr_t);
4433 psz = strchr(psz + 1, '%');
4434 }
4435 switch (cArgs)
4436 {
4437 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4438 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4439 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4440 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4441 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4442 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4443 default:
4444 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4445 }
4446#else
4447 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4448#endif
4449 va_end(va);
4450
4451 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4452 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4453
4454 /*
4455 * If we're in REM context we'll sync back the state before 'jumping' to
4456 * the EMs failure handling.
4457 */
4458 pVM = cpu_single_env->pVM;
4459 pVCpu = cpu_single_env->pVCpu;
4460 Assert(pVCpu);
4461
4462 if (pVM->rem.s.fInREM)
4463 REMR3StateBack(pVM, pVCpu);
4464 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4465 AssertMsgFailed(("EMR3FatalError returned!\n"));
4466}
4467
4468
4469/**
4470 * Aborts the VM.
4471 *
4472 * @param rc VBox error code.
4473 * @param pszTip Hint about why/when this happend.
4474 */
4475void remAbort(int rc, const char *pszTip)
4476{
4477 PVM pVM;
4478 PVMCPU pVCpu;
4479
4480 /*
4481 * Bitch about it.
4482 */
4483 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4484 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4485
4486 /*
4487 * Jump back to where we entered the recompiler.
4488 */
4489 pVM = cpu_single_env->pVM;
4490 pVCpu = cpu_single_env->pVCpu;
4491 Assert(pVCpu);
4492
4493 if (pVM->rem.s.fInREM)
4494 REMR3StateBack(pVM, pVCpu);
4495
4496 EMR3FatalError(pVCpu, rc);
4497 AssertMsgFailed(("EMR3FatalError returned!\n"));
4498}
4499
4500
4501/**
4502 * Dumps a linux system call.
4503 * @param pVCpu VMCPU handle.
4504 */
4505void remR3DumpLnxSyscall(PVMCPU pVCpu)
4506{
4507 static const char *apsz[] =
4508 {
4509 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4510 "sys_exit",
4511 "sys_fork",
4512 "sys_read",
4513 "sys_write",
4514 "sys_open", /* 5 */
4515 "sys_close",
4516 "sys_waitpid",
4517 "sys_creat",
4518 "sys_link",
4519 "sys_unlink", /* 10 */
4520 "sys_execve",
4521 "sys_chdir",
4522 "sys_time",
4523 "sys_mknod",
4524 "sys_chmod", /* 15 */
4525 "sys_lchown16",
4526 "sys_ni_syscall", /* old break syscall holder */
4527 "sys_stat",
4528 "sys_lseek",
4529 "sys_getpid", /* 20 */
4530 "sys_mount",
4531 "sys_oldumount",
4532 "sys_setuid16",
4533 "sys_getuid16",
4534 "sys_stime", /* 25 */
4535 "sys_ptrace",
4536 "sys_alarm",
4537 "sys_fstat",
4538 "sys_pause",
4539 "sys_utime", /* 30 */
4540 "sys_ni_syscall", /* old stty syscall holder */
4541 "sys_ni_syscall", /* old gtty syscall holder */
4542 "sys_access",
4543 "sys_nice",
4544 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4545 "sys_sync",
4546 "sys_kill",
4547 "sys_rename",
4548 "sys_mkdir",
4549 "sys_rmdir", /* 40 */
4550 "sys_dup",
4551 "sys_pipe",
4552 "sys_times",
4553 "sys_ni_syscall", /* old prof syscall holder */
4554 "sys_brk", /* 45 */
4555 "sys_setgid16",
4556 "sys_getgid16",
4557 "sys_signal",
4558 "sys_geteuid16",
4559 "sys_getegid16", /* 50 */
4560 "sys_acct",
4561 "sys_umount", /* recycled never used phys() */
4562 "sys_ni_syscall", /* old lock syscall holder */
4563 "sys_ioctl",
4564 "sys_fcntl", /* 55 */
4565 "sys_ni_syscall", /* old mpx syscall holder */
4566 "sys_setpgid",
4567 "sys_ni_syscall", /* old ulimit syscall holder */
4568 "sys_olduname",
4569 "sys_umask", /* 60 */
4570 "sys_chroot",
4571 "sys_ustat",
4572 "sys_dup2",
4573 "sys_getppid",
4574 "sys_getpgrp", /* 65 */
4575 "sys_setsid",
4576 "sys_sigaction",
4577 "sys_sgetmask",
4578 "sys_ssetmask",
4579 "sys_setreuid16", /* 70 */
4580 "sys_setregid16",
4581 "sys_sigsuspend",
4582 "sys_sigpending",
4583 "sys_sethostname",
4584 "sys_setrlimit", /* 75 */
4585 "sys_old_getrlimit",
4586 "sys_getrusage",
4587 "sys_gettimeofday",
4588 "sys_settimeofday",
4589 "sys_getgroups16", /* 80 */
4590 "sys_setgroups16",
4591 "old_select",
4592 "sys_symlink",
4593 "sys_lstat",
4594 "sys_readlink", /* 85 */
4595 "sys_uselib",
4596 "sys_swapon",
4597 "sys_reboot",
4598 "old_readdir",
4599 "old_mmap", /* 90 */
4600 "sys_munmap",
4601 "sys_truncate",
4602 "sys_ftruncate",
4603 "sys_fchmod",
4604 "sys_fchown16", /* 95 */
4605 "sys_getpriority",
4606 "sys_setpriority",
4607 "sys_ni_syscall", /* old profil syscall holder */
4608 "sys_statfs",
4609 "sys_fstatfs", /* 100 */
4610 "sys_ioperm",
4611 "sys_socketcall",
4612 "sys_syslog",
4613 "sys_setitimer",
4614 "sys_getitimer", /* 105 */
4615 "sys_newstat",
4616 "sys_newlstat",
4617 "sys_newfstat",
4618 "sys_uname",
4619 "sys_iopl", /* 110 */
4620 "sys_vhangup",
4621 "sys_ni_syscall", /* old "idle" system call */
4622 "sys_vm86old",
4623 "sys_wait4",
4624 "sys_swapoff", /* 115 */
4625 "sys_sysinfo",
4626 "sys_ipc",
4627 "sys_fsync",
4628 "sys_sigreturn",
4629 "sys_clone", /* 120 */
4630 "sys_setdomainname",
4631 "sys_newuname",
4632 "sys_modify_ldt",
4633 "sys_adjtimex",
4634 "sys_mprotect", /* 125 */
4635 "sys_sigprocmask",
4636 "sys_ni_syscall", /* old "create_module" */
4637 "sys_init_module",
4638 "sys_delete_module",
4639 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4640 "sys_quotactl",
4641 "sys_getpgid",
4642 "sys_fchdir",
4643 "sys_bdflush",
4644 "sys_sysfs", /* 135 */
4645 "sys_personality",
4646 "sys_ni_syscall", /* reserved for afs_syscall */
4647 "sys_setfsuid16",
4648 "sys_setfsgid16",
4649 "sys_llseek", /* 140 */
4650 "sys_getdents",
4651 "sys_select",
4652 "sys_flock",
4653 "sys_msync",
4654 "sys_readv", /* 145 */
4655 "sys_writev",
4656 "sys_getsid",
4657 "sys_fdatasync",
4658 "sys_sysctl",
4659 "sys_mlock", /* 150 */
4660 "sys_munlock",
4661 "sys_mlockall",
4662 "sys_munlockall",
4663 "sys_sched_setparam",
4664 "sys_sched_getparam", /* 155 */
4665 "sys_sched_setscheduler",
4666 "sys_sched_getscheduler",
4667 "sys_sched_yield",
4668 "sys_sched_get_priority_max",
4669 "sys_sched_get_priority_min", /* 160 */
4670 "sys_sched_rr_get_interval",
4671 "sys_nanosleep",
4672 "sys_mremap",
4673 "sys_setresuid16",
4674 "sys_getresuid16", /* 165 */
4675 "sys_vm86",
4676 "sys_ni_syscall", /* Old sys_query_module */
4677 "sys_poll",
4678 "sys_nfsservctl",
4679 "sys_setresgid16", /* 170 */
4680 "sys_getresgid16",
4681 "sys_prctl",
4682 "sys_rt_sigreturn",
4683 "sys_rt_sigaction",
4684 "sys_rt_sigprocmask", /* 175 */
4685 "sys_rt_sigpending",
4686 "sys_rt_sigtimedwait",
4687 "sys_rt_sigqueueinfo",
4688 "sys_rt_sigsuspend",
4689 "sys_pread64", /* 180 */
4690 "sys_pwrite64",
4691 "sys_chown16",
4692 "sys_getcwd",
4693 "sys_capget",
4694 "sys_capset", /* 185 */
4695 "sys_sigaltstack",
4696 "sys_sendfile",
4697 "sys_ni_syscall", /* reserved for streams1 */
4698 "sys_ni_syscall", /* reserved for streams2 */
4699 "sys_vfork", /* 190 */
4700 "sys_getrlimit",
4701 "sys_mmap2",
4702 "sys_truncate64",
4703 "sys_ftruncate64",
4704 "sys_stat64", /* 195 */
4705 "sys_lstat64",
4706 "sys_fstat64",
4707 "sys_lchown",
4708 "sys_getuid",
4709 "sys_getgid", /* 200 */
4710 "sys_geteuid",
4711 "sys_getegid",
4712 "sys_setreuid",
4713 "sys_setregid",
4714 "sys_getgroups", /* 205 */
4715 "sys_setgroups",
4716 "sys_fchown",
4717 "sys_setresuid",
4718 "sys_getresuid",
4719 "sys_setresgid", /* 210 */
4720 "sys_getresgid",
4721 "sys_chown",
4722 "sys_setuid",
4723 "sys_setgid",
4724 "sys_setfsuid", /* 215 */
4725 "sys_setfsgid",
4726 "sys_pivot_root",
4727 "sys_mincore",
4728 "sys_madvise",
4729 "sys_getdents64", /* 220 */
4730 "sys_fcntl64",
4731 "sys_ni_syscall", /* reserved for TUX */
4732 "sys_ni_syscall",
4733 "sys_gettid",
4734 "sys_readahead", /* 225 */
4735 "sys_setxattr",
4736 "sys_lsetxattr",
4737 "sys_fsetxattr",
4738 "sys_getxattr",
4739 "sys_lgetxattr", /* 230 */
4740 "sys_fgetxattr",
4741 "sys_listxattr",
4742 "sys_llistxattr",
4743 "sys_flistxattr",
4744 "sys_removexattr", /* 235 */
4745 "sys_lremovexattr",
4746 "sys_fremovexattr",
4747 "sys_tkill",
4748 "sys_sendfile64",
4749 "sys_futex", /* 240 */
4750 "sys_sched_setaffinity",
4751 "sys_sched_getaffinity",
4752 "sys_set_thread_area",
4753 "sys_get_thread_area",
4754 "sys_io_setup", /* 245 */
4755 "sys_io_destroy",
4756 "sys_io_getevents",
4757 "sys_io_submit",
4758 "sys_io_cancel",
4759 "sys_fadvise64", /* 250 */
4760 "sys_ni_syscall",
4761 "sys_exit_group",
4762 "sys_lookup_dcookie",
4763 "sys_epoll_create",
4764 "sys_epoll_ctl", /* 255 */
4765 "sys_epoll_wait",
4766 "sys_remap_file_pages",
4767 "sys_set_tid_address",
4768 "sys_timer_create",
4769 "sys_timer_settime", /* 260 */
4770 "sys_timer_gettime",
4771 "sys_timer_getoverrun",
4772 "sys_timer_delete",
4773 "sys_clock_settime",
4774 "sys_clock_gettime", /* 265 */
4775 "sys_clock_getres",
4776 "sys_clock_nanosleep",
4777 "sys_statfs64",
4778 "sys_fstatfs64",
4779 "sys_tgkill", /* 270 */
4780 "sys_utimes",
4781 "sys_fadvise64_64",
4782 "sys_ni_syscall" /* sys_vserver */
4783 };
4784
4785 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4786 switch (uEAX)
4787 {
4788 default:
4789 if (uEAX < RT_ELEMENTS(apsz))
4790 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4791 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4792 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4793 else
4794 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4795 break;
4796
4797 }
4798}
4799
4800
4801/**
4802 * Dumps an OpenBSD system call.
4803 * @param pVCpu VMCPU handle.
4804 */
4805void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4806{
4807 static const char *apsz[] =
4808 {
4809 "SYS_syscall", //0
4810 "SYS_exit", //1
4811 "SYS_fork", //2
4812 "SYS_read", //3
4813 "SYS_write", //4
4814 "SYS_open", //5
4815 "SYS_close", //6
4816 "SYS_wait4", //7
4817 "SYS_8",
4818 "SYS_link", //9
4819 "SYS_unlink", //10
4820 "SYS_11",
4821 "SYS_chdir", //12
4822 "SYS_fchdir", //13
4823 "SYS_mknod", //14
4824 "SYS_chmod", //15
4825 "SYS_chown", //16
4826 "SYS_break", //17
4827 "SYS_18",
4828 "SYS_19",
4829 "SYS_getpid", //20
4830 "SYS_mount", //21
4831 "SYS_unmount", //22
4832 "SYS_setuid", //23
4833 "SYS_getuid", //24
4834 "SYS_geteuid", //25
4835 "SYS_ptrace", //26
4836 "SYS_recvmsg", //27
4837 "SYS_sendmsg", //28
4838 "SYS_recvfrom", //29
4839 "SYS_accept", //30
4840 "SYS_getpeername", //31
4841 "SYS_getsockname", //32
4842 "SYS_access", //33
4843 "SYS_chflags", //34
4844 "SYS_fchflags", //35
4845 "SYS_sync", //36
4846 "SYS_kill", //37
4847 "SYS_38",
4848 "SYS_getppid", //39
4849 "SYS_40",
4850 "SYS_dup", //41
4851 "SYS_opipe", //42
4852 "SYS_getegid", //43
4853 "SYS_profil", //44
4854 "SYS_ktrace", //45
4855 "SYS_sigaction", //46
4856 "SYS_getgid", //47
4857 "SYS_sigprocmask", //48
4858 "SYS_getlogin", //49
4859 "SYS_setlogin", //50
4860 "SYS_acct", //51
4861 "SYS_sigpending", //52
4862 "SYS_osigaltstack", //53
4863 "SYS_ioctl", //54
4864 "SYS_reboot", //55
4865 "SYS_revoke", //56
4866 "SYS_symlink", //57
4867 "SYS_readlink", //58
4868 "SYS_execve", //59
4869 "SYS_umask", //60
4870 "SYS_chroot", //61
4871 "SYS_62",
4872 "SYS_63",
4873 "SYS_64",
4874 "SYS_65",
4875 "SYS_vfork", //66
4876 "SYS_67",
4877 "SYS_68",
4878 "SYS_sbrk", //69
4879 "SYS_sstk", //70
4880 "SYS_61",
4881 "SYS_vadvise", //72
4882 "SYS_munmap", //73
4883 "SYS_mprotect", //74
4884 "SYS_madvise", //75
4885 "SYS_76",
4886 "SYS_77",
4887 "SYS_mincore", //78
4888 "SYS_getgroups", //79
4889 "SYS_setgroups", //80
4890 "SYS_getpgrp", //81
4891 "SYS_setpgid", //82
4892 "SYS_setitimer", //83
4893 "SYS_84",
4894 "SYS_85",
4895 "SYS_getitimer", //86
4896 "SYS_87",
4897 "SYS_88",
4898 "SYS_89",
4899 "SYS_dup2", //90
4900 "SYS_91",
4901 "SYS_fcntl", //92
4902 "SYS_select", //93
4903 "SYS_94",
4904 "SYS_fsync", //95
4905 "SYS_setpriority", //96
4906 "SYS_socket", //97
4907 "SYS_connect", //98
4908 "SYS_99",
4909 "SYS_getpriority", //100
4910 "SYS_101",
4911 "SYS_102",
4912 "SYS_sigreturn", //103
4913 "SYS_bind", //104
4914 "SYS_setsockopt", //105
4915 "SYS_listen", //106
4916 "SYS_107",
4917 "SYS_108",
4918 "SYS_109",
4919 "SYS_110",
4920 "SYS_sigsuspend", //111
4921 "SYS_112",
4922 "SYS_113",
4923 "SYS_114",
4924 "SYS_115",
4925 "SYS_gettimeofday", //116
4926 "SYS_getrusage", //117
4927 "SYS_getsockopt", //118
4928 "SYS_119",
4929 "SYS_readv", //120
4930 "SYS_writev", //121
4931 "SYS_settimeofday", //122
4932 "SYS_fchown", //123
4933 "SYS_fchmod", //124
4934 "SYS_125",
4935 "SYS_setreuid", //126
4936 "SYS_setregid", //127
4937 "SYS_rename", //128
4938 "SYS_129",
4939 "SYS_130",
4940 "SYS_flock", //131
4941 "SYS_mkfifo", //132
4942 "SYS_sendto", //133
4943 "SYS_shutdown", //134
4944 "SYS_socketpair", //135
4945 "SYS_mkdir", //136
4946 "SYS_rmdir", //137
4947 "SYS_utimes", //138
4948 "SYS_139",
4949 "SYS_adjtime", //140
4950 "SYS_141",
4951 "SYS_142",
4952 "SYS_143",
4953 "SYS_144",
4954 "SYS_145",
4955 "SYS_146",
4956 "SYS_setsid", //147
4957 "SYS_quotactl", //148
4958 "SYS_149",
4959 "SYS_150",
4960 "SYS_151",
4961 "SYS_152",
4962 "SYS_153",
4963 "SYS_154",
4964 "SYS_nfssvc", //155
4965 "SYS_156",
4966 "SYS_157",
4967 "SYS_158",
4968 "SYS_159",
4969 "SYS_160",
4970 "SYS_getfh", //161
4971 "SYS_162",
4972 "SYS_163",
4973 "SYS_164",
4974 "SYS_sysarch", //165
4975 "SYS_166",
4976 "SYS_167",
4977 "SYS_168",
4978 "SYS_169",
4979 "SYS_170",
4980 "SYS_171",
4981 "SYS_172",
4982 "SYS_pread", //173
4983 "SYS_pwrite", //174
4984 "SYS_175",
4985 "SYS_176",
4986 "SYS_177",
4987 "SYS_178",
4988 "SYS_179",
4989 "SYS_180",
4990 "SYS_setgid", //181
4991 "SYS_setegid", //182
4992 "SYS_seteuid", //183
4993 "SYS_lfs_bmapv", //184
4994 "SYS_lfs_markv", //185
4995 "SYS_lfs_segclean", //186
4996 "SYS_lfs_segwait", //187
4997 "SYS_188",
4998 "SYS_189",
4999 "SYS_190",
5000 "SYS_pathconf", //191
5001 "SYS_fpathconf", //192
5002 "SYS_swapctl", //193
5003 "SYS_getrlimit", //194
5004 "SYS_setrlimit", //195
5005 "SYS_getdirentries", //196
5006 "SYS_mmap", //197
5007 "SYS___syscall", //198
5008 "SYS_lseek", //199
5009 "SYS_truncate", //200
5010 "SYS_ftruncate", //201
5011 "SYS___sysctl", //202
5012 "SYS_mlock", //203
5013 "SYS_munlock", //204
5014 "SYS_205",
5015 "SYS_futimes", //206
5016 "SYS_getpgid", //207
5017 "SYS_xfspioctl", //208
5018 "SYS_209",
5019 "SYS_210",
5020 "SYS_211",
5021 "SYS_212",
5022 "SYS_213",
5023 "SYS_214",
5024 "SYS_215",
5025 "SYS_216",
5026 "SYS_217",
5027 "SYS_218",
5028 "SYS_219",
5029 "SYS_220",
5030 "SYS_semget", //221
5031 "SYS_222",
5032 "SYS_223",
5033 "SYS_224",
5034 "SYS_msgget", //225
5035 "SYS_msgsnd", //226
5036 "SYS_msgrcv", //227
5037 "SYS_shmat", //228
5038 "SYS_229",
5039 "SYS_shmdt", //230
5040 "SYS_231",
5041 "SYS_clock_gettime", //232
5042 "SYS_clock_settime", //233
5043 "SYS_clock_getres", //234
5044 "SYS_235",
5045 "SYS_236",
5046 "SYS_237",
5047 "SYS_238",
5048 "SYS_239",
5049 "SYS_nanosleep", //240
5050 "SYS_241",
5051 "SYS_242",
5052 "SYS_243",
5053 "SYS_244",
5054 "SYS_245",
5055 "SYS_246",
5056 "SYS_247",
5057 "SYS_248",
5058 "SYS_249",
5059 "SYS_minherit", //250
5060 "SYS_rfork", //251
5061 "SYS_poll", //252
5062 "SYS_issetugid", //253
5063 "SYS_lchown", //254
5064 "SYS_getsid", //255
5065 "SYS_msync", //256
5066 "SYS_257",
5067 "SYS_258",
5068 "SYS_259",
5069 "SYS_getfsstat", //260
5070 "SYS_statfs", //261
5071 "SYS_fstatfs", //262
5072 "SYS_pipe", //263
5073 "SYS_fhopen", //264
5074 "SYS_265",
5075 "SYS_fhstatfs", //266
5076 "SYS_preadv", //267
5077 "SYS_pwritev", //268
5078 "SYS_kqueue", //269
5079 "SYS_kevent", //270
5080 "SYS_mlockall", //271
5081 "SYS_munlockall", //272
5082 "SYS_getpeereid", //273
5083 "SYS_274",
5084 "SYS_275",
5085 "SYS_276",
5086 "SYS_277",
5087 "SYS_278",
5088 "SYS_279",
5089 "SYS_280",
5090 "SYS_getresuid", //281
5091 "SYS_setresuid", //282
5092 "SYS_getresgid", //283
5093 "SYS_setresgid", //284
5094 "SYS_285",
5095 "SYS_mquery", //286
5096 "SYS_closefrom", //287
5097 "SYS_sigaltstack", //288
5098 "SYS_shmget", //289
5099 "SYS_semop", //290
5100 "SYS_stat", //291
5101 "SYS_fstat", //292
5102 "SYS_lstat", //293
5103 "SYS_fhstat", //294
5104 "SYS___semctl", //295
5105 "SYS_shmctl", //296
5106 "SYS_msgctl", //297
5107 "SYS_MAXSYSCALL", //298
5108 //299
5109 //300
5110 };
5111 uint32_t uEAX;
5112 if (!LogIsEnabled())
5113 return;
5114 uEAX = CPUMGetGuestEAX(pVCpu);
5115 switch (uEAX)
5116 {
5117 default:
5118 if (uEAX < RT_ELEMENTS(apsz))
5119 {
5120 uint32_t au32Args[8] = {0};
5121 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5122 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5123 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5124 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5125 }
5126 else
5127 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5128 break;
5129 }
5130}
5131
5132
5133#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5134/**
5135 * The Dll main entry point (stub).
5136 */
5137bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5138{
5139 return true;
5140}
5141
5142void *memcpy(void *dst, const void *src, size_t size)
5143{
5144 uint8_t*pbDst = dst, *pbSrc = src;
5145 while (size-- > 0)
5146 *pbDst++ = *pbSrc++;
5147 return dst;
5148}
5149
5150#endif
5151
5152void cpu_smm_update(CPUState *env)
5153{
5154}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette