VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 21052

Last change on this file since 21052 was 20867, checked in by vboxsync, 15 years ago

REMR3ReplayHandlerNotifications: Fixed list reversal regression from r48863.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 165.2 KB
Line 
1/* $Id: VBoxRecompiler.c 20867 2009-06-24 00:09:39Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .pResultDesc = NULL,
214 .fFlags = 0,
215 .pfnHandler = remR3CmdDisasEnableStepping,
216 .pszSyntax = "[on/off]",
217 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
218 "If no arguments show the current state."
219 }
220};
221#endif
222
223/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
224uint8_t *code_gen_prologue;
225
226
227/*******************************************************************************
228* Internal Functions *
229*******************************************************************************/
230void remAbort(int rc, const char *pszTip);
231extern int testmath(void);
232
233/* Put them here to avoid unused variable warning. */
234AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
235#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
236//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
237/* Why did this have to be identical?? */
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#else
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#endif
242
243
244/**
245 * Initializes the REM.
246 *
247 * @returns VBox status code.
248 * @param pVM The VM to operate on.
249 */
250REMR3DECL(int) REMR3Init(PVM pVM)
251{
252 PREMHANDLERNOTIFICATION pCur;
253 uint32_t u32Dummy;
254 int rc;
255 unsigned i;
256
257#ifdef VBOX_ENABLE_VBOXREM64
258 LogRel(("Using 64-bit aware REM\n"));
259#endif
260
261 /*
262 * Assert sanity.
263 */
264 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
265 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
266 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
267#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
268 Assert(!testmath());
269#endif
270
271 /*
272 * Init some internal data members.
273 */
274 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
275 pVM->rem.s.Env.pVM = pVM;
276#ifdef CPU_RAW_MODE_INIT
277 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
278#endif
279
280 /*
281 * Initialize the REM critical section.
282 *
283 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
284 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
285 * deadlocks. (mostly pgm vs rem locking)
286 */
287 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, "REM-Register");
288 AssertRCReturn(rc, rc);
289
290 /* ctx. */
291 pVM->rem.s.pCtx = NULL; /* set when executing code. */
292 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
293
294 /* ignore all notifications */
295 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
296
297 code_gen_prologue = RTMemExecAlloc(_1K);
298 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
299
300 cpu_exec_init_all(0);
301
302 /*
303 * Init the recompiler.
304 */
305 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
306 {
307 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
308 return VERR_GENERAL_FAILURE;
309 }
310 PVMCPU pVCpu = VMMGetCpu(pVM);
311 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
312 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
313
314 /* allocate code buffer for single instruction emulation. */
315 pVM->rem.s.Env.cbCodeBuffer = 4096;
316 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
317 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
318
319 /* finally, set the cpu_single_env global. */
320 cpu_single_env = &pVM->rem.s.Env;
321
322 /* Nothing is pending by default */
323 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
324
325 /*
326 * Register ram types.
327 */
328 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
330 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
331 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
332 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
333
334 /* stop ignoring. */
335 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
336
337 /*
338 * Register the saved state data unit.
339 */
340 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
341 NULL, remR3Save, NULL,
342 NULL, remR3Load, NULL);
343 if (RT_FAILURE(rc))
344 return rc;
345
346#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
347 /*
348 * Debugger commands.
349 */
350 static bool fRegisteredCmds = false;
351 if (!fRegisteredCmds)
352 {
353 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
354 if (RT_SUCCESS(rc))
355 fRegisteredCmds = true;
356 }
357#endif
358
359#ifdef VBOX_WITH_STATISTICS
360 /*
361 * Statistics.
362 */
363 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
364 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
365 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
366 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
367 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
370 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
371 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
372 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
373 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
374 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
375
376 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
377
378 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
379 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
380 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
381 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
382 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
383 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
384 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
385 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
386 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
387 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
388 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
389
390 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
391 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
392 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
393 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
394
395 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
399 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
401
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
407 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
408
409 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
410#endif /* VBOX_WITH_STATISTICS */
411
412 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
413 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
414 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
415
416
417#ifdef DEBUG_ALL_LOGGING
418 loglevel = ~0;
419# ifdef DEBUG_TMP_LOGGING
420 logfile = fopen("/tmp/vbox-qemu.log", "w");
421# endif
422#endif
423
424 /*
425 * Init the handler notification lists.
426 */
427 pVM->rem.s.idxPendingList = UINT32_MAX;
428 pVM->rem.s.idxFreeList = 0;
429
430 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
431 {
432 pCur = &pVM->rem.s.aHandlerNotifications[i];
433 pCur->idxNext = i + 1;
434 pCur->idxSelf = i;
435 }
436 pCur->idxNext = UINT32_MAX; /* the last record. */
437
438 return rc;
439}
440
441
442/**
443 * Finalizes the REM initialization.
444 *
445 * This is called after all components, devices and drivers has
446 * been initialized. Its main purpose it to finish the RAM related
447 * initialization.
448 *
449 * @returns VBox status code.
450 *
451 * @param pVM The VM handle.
452 */
453REMR3DECL(int) REMR3InitFinalize(PVM pVM)
454{
455 int rc;
456
457 /*
458 * Ram size & dirty bit map.
459 */
460 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
461 pVM->rem.s.fGCPhysLastRamFixed = true;
462#ifdef RT_STRICT
463 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
464#else
465 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
466#endif
467 return rc;
468}
469
470
471/**
472 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
473 *
474 * @returns VBox status code.
475 * @param pVM The VM handle.
476 * @param fGuarded Whether to guard the map.
477 */
478static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
479{
480 int rc = VINF_SUCCESS;
481 RTGCPHYS cb;
482
483 cb = pVM->rem.s.GCPhysLastRam + 1;
484 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
485 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
486 VERR_OUT_OF_RANGE);
487 phys_ram_size = cb;
488 phys_ram_dirty_size = cb >> PAGE_SHIFT;
489 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
490
491 if (!fGuarded)
492 {
493 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
494 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
495 }
496 else
497 {
498 /*
499 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
500 */
501 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
502 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
503 if (cbBitmapFull == cbBitmapAligned)
504 cbBitmapFull += _4G >> PAGE_SHIFT;
505 else if (cbBitmapFull - cbBitmapAligned < _64K)
506 cbBitmapFull += _64K;
507
508 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
509 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
510
511 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
512 if (RT_FAILURE(rc))
513 {
514 RTMemPageFree(phys_ram_dirty);
515 AssertLogRelRCReturn(rc, rc);
516 }
517
518 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
519 }
520
521 /* initialize it. */
522 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
523 return rc;
524}
525
526
527/**
528 * Terminates the REM.
529 *
530 * Termination means cleaning up and freeing all resources,
531 * the VM it self is at this point powered off or suspended.
532 *
533 * @returns VBox status code.
534 * @param pVM The VM to operate on.
535 */
536REMR3DECL(int) REMR3Term(PVM pVM)
537{
538#ifdef VBOX_WITH_STATISTICS
539 /*
540 * Statistics.
541 */
542 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
543 STAM_DEREG(pVM, &gStatCompilationQEmu);
544 STAM_DEREG(pVM, &gStatRunCodeQEmu);
545 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
546 STAM_DEREG(pVM, &gStatTimers);
547 STAM_DEREG(pVM, &gStatTBLookup);
548 STAM_DEREG(pVM, &gStatIRQ);
549 STAM_DEREG(pVM, &gStatRawCheck);
550 STAM_DEREG(pVM, &gStatMemRead);
551 STAM_DEREG(pVM, &gStatMemWrite);
552 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
553 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
554
555 STAM_DEREG(pVM, &gStatCpuGetTSC);
556
557 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
558 STAM_DEREG(pVM, &gStatRefuseVM86);
559 STAM_DEREG(pVM, &gStatRefusePaging);
560 STAM_DEREG(pVM, &gStatRefusePAE);
561 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
562 STAM_DEREG(pVM, &gStatRefuseIF0);
563 STAM_DEREG(pVM, &gStatRefuseCode16);
564 STAM_DEREG(pVM, &gStatRefuseWP0);
565 STAM_DEREG(pVM, &gStatRefuseRing1or2);
566 STAM_DEREG(pVM, &gStatRefuseCanExecute);
567 STAM_DEREG(pVM, &gStatFlushTBs);
568
569 STAM_DEREG(pVM, &gStatREMGDTChange);
570 STAM_DEREG(pVM, &gStatREMLDTRChange);
571 STAM_DEREG(pVM, &gStatREMIDTChange);
572 STAM_DEREG(pVM, &gStatREMTRChange);
573
574 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
580
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
587
588 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
589#endif /* VBOX_WITH_STATISTICS */
590
591 STAM_REL_DEREG(pVM, &tb_flush_count);
592 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
593 STAM_REL_DEREG(pVM, &tlb_flush_count);
594
595 return VINF_SUCCESS;
596}
597
598
599/**
600 * The VM is being reset.
601 *
602 * For the REM component this means to call the cpu_reset() and
603 * reinitialize some state variables.
604 *
605 * @param pVM VM handle.
606 */
607REMR3DECL(void) REMR3Reset(PVM pVM)
608{
609 /*
610 * Reset the REM cpu.
611 */
612 Assert(pVM->rem.s.cIgnoreAll == 0);
613 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
614 cpu_reset(&pVM->rem.s.Env);
615 pVM->rem.s.cInvalidatedPages = 0;
616 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
617 Assert(pVM->rem.s.cIgnoreAll == 0);
618
619 /* Clear raw ring 0 init state */
620 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
621
622 /* Flush the TBs the next time we execute code here. */
623 pVM->rem.s.fFlushTBs = true;
624}
625
626
627/**
628 * Execute state save operation.
629 *
630 * @returns VBox status code.
631 * @param pVM VM Handle.
632 * @param pSSM SSM operation handle.
633 */
634static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
635{
636 PREM pRem = &pVM->rem.s;
637
638 /*
639 * Save the required CPU Env bits.
640 * (Not much because we're never in REM when doing the save.)
641 */
642 LogFlow(("remR3Save:\n"));
643 Assert(!pRem->fInREM);
644 SSMR3PutU32(pSSM, pRem->Env.hflags);
645 SSMR3PutU32(pSSM, ~0); /* separator */
646
647 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
648 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
649 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
650
651 return SSMR3PutU32(pSSM, ~0); /* terminator */
652}
653
654
655/**
656 * Execute state load operation.
657 *
658 * @returns VBox status code.
659 * @param pVM VM Handle.
660 * @param pSSM SSM operation handle.
661 * @param u32Version Data layout version.
662 */
663static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
664{
665 uint32_t u32Dummy;
666 uint32_t fRawRing0 = false;
667 uint32_t u32Sep;
668 unsigned i;
669 int rc;
670 PREM pRem;
671 LogFlow(("remR3Load:\n"));
672
673 /*
674 * Validate version.
675 */
676 if ( u32Version != REM_SAVED_STATE_VERSION
677 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
678 {
679 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
680 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
681 }
682
683 /*
684 * Do a reset to be on the safe side...
685 */
686 REMR3Reset(pVM);
687
688 /*
689 * Ignore all ignorable notifications.
690 * (Not doing this will cause serious trouble.)
691 */
692 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
693
694 /*
695 * Load the required CPU Env bits.
696 * (Not much because we're never in REM when doing the save.)
697 */
698 pRem = &pVM->rem.s;
699 Assert(!pRem->fInREM);
700 SSMR3GetU32(pSSM, &pRem->Env.hflags);
701 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
702 {
703 /* Redundant REM CPU state has to be loaded, but can be ignored. */
704 CPUX86State_Ver16 temp;
705 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
706 }
707
708 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
709 if (RT_FAILURE(rc))
710 return rc;
711 if (u32Sep != ~0U)
712 {
713 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
714 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
715 }
716
717 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
718 SSMR3GetUInt(pSSM, &fRawRing0);
719 if (fRawRing0)
720 pRem->Env.state |= CPU_RAW_RING0;
721
722 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
723 {
724 /*
725 * Load the REM stuff.
726 */
727 /** @todo r=bird: We should just drop all these items, restoring doesn't make
728 * sense. */
729 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
730 if (RT_FAILURE(rc))
731 return rc;
732 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
733 {
734 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
735 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
736 }
737 for (i = 0; i < pRem->cInvalidatedPages; i++)
738 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
739 }
740
741 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
742 if (RT_FAILURE(rc))
743 return rc;
744
745 /* check the terminator. */
746 rc = SSMR3GetU32(pSSM, &u32Sep);
747 if (RT_FAILURE(rc))
748 return rc;
749 if (u32Sep != ~0U)
750 {
751 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
752 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
753 }
754
755 /*
756 * Get the CPUID features.
757 */
758 PVMCPU pVCpu = VMMGetCpu(pVM);
759 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
760 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
761
762 /*
763 * Sync the Load Flush the TLB
764 */
765 tlb_flush(&pRem->Env, 1);
766
767 /*
768 * Stop ignoring ignornable notifications.
769 */
770 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
771
772 /*
773 * Sync the whole CPU state when executing code in the recompiler.
774 */
775 for (i=0;i<pVM->cCPUs;i++)
776 {
777 PVMCPU pVCpu = &pVM->aCpus[i];
778
779 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
780 }
781 return VINF_SUCCESS;
782}
783
784
785
786#undef LOG_GROUP
787#define LOG_GROUP LOG_GROUP_REM_RUN
788
789/**
790 * Single steps an instruction in recompiled mode.
791 *
792 * Before calling this function the REM state needs to be in sync with
793 * the VM. Call REMR3State() to perform the sync. It's only necessary
794 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
795 * and after calling REMR3StateBack().
796 *
797 * @returns VBox status code.
798 *
799 * @param pVM VM Handle.
800 * @param pVCpu VMCPU Handle.
801 */
802REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
803{
804 int rc, interrupt_request;
805 RTGCPTR GCPtrPC;
806 bool fBp;
807
808 /*
809 * Lock the REM - we don't wanna have anyone interrupting us
810 * while stepping - and enabled single stepping. We also ignore
811 * pending interrupts and suchlike.
812 */
813 interrupt_request = pVM->rem.s.Env.interrupt_request;
814 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
815 pVM->rem.s.Env.interrupt_request = 0;
816 cpu_single_step(&pVM->rem.s.Env, 1);
817
818 /*
819 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
820 */
821 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
822 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
823
824 /*
825 * Execute and handle the return code.
826 * We execute without enabling the cpu tick, so on success we'll
827 * just flip it on and off to make sure it moves
828 */
829 rc = cpu_exec(&pVM->rem.s.Env);
830 if (rc == EXCP_DEBUG)
831 {
832 TMR3NotifyResume(pVM, pVCpu);
833 TMR3NotifySuspend(pVM, pVCpu);
834 rc = VINF_EM_DBG_STEPPED;
835 }
836 else
837 {
838 switch (rc)
839 {
840 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
841 case EXCP_HLT:
842 case EXCP_HALTED: rc = VINF_EM_HALT; break;
843 case EXCP_RC:
844 rc = pVM->rem.s.rc;
845 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
846 break;
847 case EXCP_EXECUTE_RAW:
848 case EXCP_EXECUTE_HWACC:
849 /** @todo: is it correct? No! */
850 rc = VINF_SUCCESS;
851 break;
852 default:
853 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
854 rc = VERR_INTERNAL_ERROR;
855 break;
856 }
857 }
858
859 /*
860 * Restore the stuff we changed to prevent interruption.
861 * Unlock the REM.
862 */
863 if (fBp)
864 {
865 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
866 Assert(rc2 == 0); NOREF(rc2);
867 }
868 cpu_single_step(&pVM->rem.s.Env, 0);
869 pVM->rem.s.Env.interrupt_request = interrupt_request;
870
871 return rc;
872}
873
874
875/**
876 * Set a breakpoint using the REM facilities.
877 *
878 * @returns VBox status code.
879 * @param pVM The VM handle.
880 * @param Address The breakpoint address.
881 * @thread The emulation thread.
882 */
883REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
884{
885 VM_ASSERT_EMT(pVM);
886 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
887 {
888 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
889 return VINF_SUCCESS;
890 }
891 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
892 return VERR_REM_NO_MORE_BP_SLOTS;
893}
894
895
896/**
897 * Clears a breakpoint set by REMR3BreakpointSet().
898 *
899 * @returns VBox status code.
900 * @param pVM The VM handle.
901 * @param Address The breakpoint address.
902 * @thread The emulation thread.
903 */
904REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
905{
906 VM_ASSERT_EMT(pVM);
907 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
908 {
909 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
910 return VINF_SUCCESS;
911 }
912 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
913 return VERR_REM_BP_NOT_FOUND;
914}
915
916
917/**
918 * Emulate an instruction.
919 *
920 * This function executes one instruction without letting anyone
921 * interrupt it. This is intended for being called while being in
922 * raw mode and thus will take care of all the state syncing between
923 * REM and the rest.
924 *
925 * @returns VBox status code.
926 * @param pVM VM handle.
927 * @param pVCpu VMCPU Handle.
928 */
929REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
930{
931 bool fFlushTBs;
932
933 int rc, rc2;
934 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
935
936 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
937 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
938 */
939 if (HWACCMIsEnabled(pVM))
940 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
941
942 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
943 fFlushTBs = pVM->rem.s.fFlushTBs;
944 pVM->rem.s.fFlushTBs = false;
945
946 /*
947 * Sync the state and enable single instruction / single stepping.
948 */
949 rc = REMR3State(pVM, pVCpu);
950 pVM->rem.s.fFlushTBs = fFlushTBs;
951 if (RT_SUCCESS(rc))
952 {
953 int interrupt_request = pVM->rem.s.Env.interrupt_request;
954 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
955 Assert(!pVM->rem.s.Env.singlestep_enabled);
956 /*
957 * Now we set the execute single instruction flag and enter the cpu_exec loop.
958 */
959 TMNotifyStartOfExecution(pVCpu);
960 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
961 rc = cpu_exec(&pVM->rem.s.Env);
962 TMNotifyEndOfExecution(pVCpu);
963 switch (rc)
964 {
965 /*
966 * Executed without anything out of the way happening.
967 */
968 case EXCP_SINGLE_INSTR:
969 rc = VINF_EM_RESCHEDULE;
970 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
971 break;
972
973 /*
974 * If we take a trap or start servicing a pending interrupt, we might end up here.
975 * (Timer thread or some other thread wishing EMT's attention.)
976 */
977 case EXCP_INTERRUPT:
978 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
979 rc = VINF_EM_RESCHEDULE;
980 break;
981
982 /*
983 * Single step, we assume!
984 * If there was a breakpoint there we're fucked now.
985 */
986 case EXCP_DEBUG:
987 {
988 /* breakpoint or single step? */
989 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
990 int iBP;
991 rc = VINF_EM_DBG_STEPPED;
992 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
993 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
994 {
995 rc = VINF_EM_DBG_BREAKPOINT;
996 break;
997 }
998 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
999 break;
1000 }
1001
1002 /*
1003 * hlt instruction.
1004 */
1005 case EXCP_HLT:
1006 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1007 rc = VINF_EM_HALT;
1008 break;
1009
1010 /*
1011 * The VM has halted.
1012 */
1013 case EXCP_HALTED:
1014 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1015 rc = VINF_EM_HALT;
1016 break;
1017
1018 /*
1019 * Switch to RAW-mode.
1020 */
1021 case EXCP_EXECUTE_RAW:
1022 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1023 rc = VINF_EM_RESCHEDULE_RAW;
1024 break;
1025
1026 /*
1027 * Switch to hardware accelerated RAW-mode.
1028 */
1029 case EXCP_EXECUTE_HWACC:
1030 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1031 rc = VINF_EM_RESCHEDULE_HWACC;
1032 break;
1033
1034 /*
1035 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1036 */
1037 case EXCP_RC:
1038 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1039 rc = pVM->rem.s.rc;
1040 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1041 break;
1042
1043 /*
1044 * Figure out the rest when they arrive....
1045 */
1046 default:
1047 AssertMsgFailed(("rc=%d\n", rc));
1048 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1049 rc = VINF_EM_RESCHEDULE;
1050 break;
1051 }
1052
1053 /*
1054 * Switch back the state.
1055 */
1056 pVM->rem.s.Env.interrupt_request = interrupt_request;
1057 rc2 = REMR3StateBack(pVM, pVCpu);
1058 AssertRC(rc2);
1059 }
1060
1061 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1062 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1063 return rc;
1064}
1065
1066
1067/**
1068 * Runs code in recompiled mode.
1069 *
1070 * Before calling this function the REM state needs to be in sync with
1071 * the VM. Call REMR3State() to perform the sync. It's only necessary
1072 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1073 * and after calling REMR3StateBack().
1074 *
1075 * @returns VBox status code.
1076 *
1077 * @param pVM VM Handle.
1078 * @param pVCpu VMCPU Handle.
1079 */
1080REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1081{
1082 int rc;
1083 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1084 Assert(pVM->rem.s.fInREM);
1085
1086 TMNotifyStartOfExecution(pVCpu);
1087 rc = cpu_exec(&pVM->rem.s.Env);
1088 TMNotifyEndOfExecution(pVCpu);
1089 switch (rc)
1090 {
1091 /*
1092 * This happens when the execution was interrupted
1093 * by an external event, like pending timers.
1094 */
1095 case EXCP_INTERRUPT:
1096 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1097 rc = VINF_SUCCESS;
1098 break;
1099
1100 /*
1101 * hlt instruction.
1102 */
1103 case EXCP_HLT:
1104 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1105 rc = VINF_EM_HALT;
1106 break;
1107
1108 /*
1109 * The VM has halted.
1110 */
1111 case EXCP_HALTED:
1112 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1113 rc = VINF_EM_HALT;
1114 break;
1115
1116 /*
1117 * Breakpoint/single step.
1118 */
1119 case EXCP_DEBUG:
1120 {
1121#if 0//def DEBUG_bird
1122 static int iBP = 0;
1123 printf("howdy, breakpoint! iBP=%d\n", iBP);
1124 switch (iBP)
1125 {
1126 case 0:
1127 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1128 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1129 //pVM->rem.s.Env.interrupt_request = 0;
1130 //pVM->rem.s.Env.exception_index = -1;
1131 //g_fInterruptDisabled = 1;
1132 rc = VINF_SUCCESS;
1133 asm("int3");
1134 break;
1135 default:
1136 asm("int3");
1137 break;
1138 }
1139 iBP++;
1140#else
1141 /* breakpoint or single step? */
1142 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1143 int iBP;
1144 rc = VINF_EM_DBG_STEPPED;
1145 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1146 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1147 {
1148 rc = VINF_EM_DBG_BREAKPOINT;
1149 break;
1150 }
1151 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1152#endif
1153 break;
1154 }
1155
1156 /*
1157 * Switch to RAW-mode.
1158 */
1159 case EXCP_EXECUTE_RAW:
1160 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1161 rc = VINF_EM_RESCHEDULE_RAW;
1162 break;
1163
1164 /*
1165 * Switch to hardware accelerated RAW-mode.
1166 */
1167 case EXCP_EXECUTE_HWACC:
1168 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1169 rc = VINF_EM_RESCHEDULE_HWACC;
1170 break;
1171
1172 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1173 /*
1174 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1175 */
1176 case EXCP_RC:
1177 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1178 rc = pVM->rem.s.rc;
1179 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1180 break;
1181
1182 /*
1183 * Figure out the rest when they arrive....
1184 */
1185 default:
1186 AssertMsgFailed(("rc=%d\n", rc));
1187 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1188 rc = VINF_SUCCESS;
1189 break;
1190 }
1191
1192 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1193 return rc;
1194}
1195
1196
1197/**
1198 * Check if the cpu state is suitable for Raw execution.
1199 *
1200 * @returns boolean
1201 * @param env The CPU env struct.
1202 * @param eip The EIP to check this for (might differ from env->eip).
1203 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1204 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1205 *
1206 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1207 */
1208bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1209{
1210 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1211 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1212 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1213 uint32_t u32CR0;
1214
1215 /* Update counter. */
1216 env->pVM->rem.s.cCanExecuteRaw++;
1217
1218 if (HWACCMIsEnabled(env->pVM))
1219 {
1220 CPUMCTX Ctx;
1221
1222 env->state |= CPU_RAW_HWACC;
1223
1224 /*
1225 * Create partial context for HWACCMR3CanExecuteGuest
1226 */
1227 Ctx.cr0 = env->cr[0];
1228 Ctx.cr3 = env->cr[3];
1229 Ctx.cr4 = env->cr[4];
1230
1231 Ctx.tr = env->tr.selector;
1232 Ctx.trHid.u64Base = env->tr.base;
1233 Ctx.trHid.u32Limit = env->tr.limit;
1234 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1235
1236 Ctx.idtr.cbIdt = env->idt.limit;
1237 Ctx.idtr.pIdt = env->idt.base;
1238
1239 Ctx.gdtr.cbGdt = env->gdt.limit;
1240 Ctx.gdtr.pGdt = env->gdt.base;
1241
1242 Ctx.rsp = env->regs[R_ESP];
1243 Ctx.rip = env->eip;
1244
1245 Ctx.eflags.u32 = env->eflags;
1246
1247 Ctx.cs = env->segs[R_CS].selector;
1248 Ctx.csHid.u64Base = env->segs[R_CS].base;
1249 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1250 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1251
1252 Ctx.ds = env->segs[R_DS].selector;
1253 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1254 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1255 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1256
1257 Ctx.es = env->segs[R_ES].selector;
1258 Ctx.esHid.u64Base = env->segs[R_ES].base;
1259 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1260 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1261
1262 Ctx.fs = env->segs[R_FS].selector;
1263 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1264 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1265 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1266
1267 Ctx.gs = env->segs[R_GS].selector;
1268 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1269 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1270 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1271
1272 Ctx.ss = env->segs[R_SS].selector;
1273 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1274 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1275 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1276
1277 Ctx.msrEFER = env->efer;
1278
1279 /* Hardware accelerated raw-mode:
1280 *
1281 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1282 */
1283 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1284 {
1285 *piException = EXCP_EXECUTE_HWACC;
1286 return true;
1287 }
1288 return false;
1289 }
1290
1291 /*
1292 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1293 * or 32 bits protected mode ring 0 code
1294 *
1295 * The tests are ordered by the likelyhood of being true during normal execution.
1296 */
1297 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1298 {
1299 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1300 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1301 return false;
1302 }
1303
1304#ifndef VBOX_RAW_V86
1305 if (fFlags & VM_MASK) {
1306 STAM_COUNTER_INC(&gStatRefuseVM86);
1307 Log2(("raw mode refused: VM_MASK\n"));
1308 return false;
1309 }
1310#endif
1311
1312 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1313 {
1314#ifndef DEBUG_bird
1315 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1316#endif
1317 return false;
1318 }
1319
1320 if (env->singlestep_enabled)
1321 {
1322 //Log2(("raw mode refused: Single step\n"));
1323 return false;
1324 }
1325
1326 if (env->nb_breakpoints > 0)
1327 {
1328 //Log2(("raw mode refused: Breakpoints\n"));
1329 return false;
1330 }
1331
1332 u32CR0 = env->cr[0];
1333 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1334 {
1335 STAM_COUNTER_INC(&gStatRefusePaging);
1336 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1337 return false;
1338 }
1339
1340 if (env->cr[4] & CR4_PAE_MASK)
1341 {
1342 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1343 {
1344 STAM_COUNTER_INC(&gStatRefusePAE);
1345 return false;
1346 }
1347 }
1348
1349 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1350 {
1351 if (!EMIsRawRing3Enabled(env->pVM))
1352 return false;
1353
1354 if (!(env->eflags & IF_MASK))
1355 {
1356 STAM_COUNTER_INC(&gStatRefuseIF0);
1357 Log2(("raw mode refused: IF (RawR3)\n"));
1358 return false;
1359 }
1360
1361 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1362 {
1363 STAM_COUNTER_INC(&gStatRefuseWP0);
1364 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1365 return false;
1366 }
1367 }
1368 else
1369 {
1370 if (!EMIsRawRing0Enabled(env->pVM))
1371 return false;
1372
1373 // Let's start with pure 32 bits ring 0 code first
1374 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1375 {
1376 STAM_COUNTER_INC(&gStatRefuseCode16);
1377 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1378 return false;
1379 }
1380
1381 // Only R0
1382 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1383 {
1384 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1385 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1386 return false;
1387 }
1388
1389 if (!(u32CR0 & CR0_WP_MASK))
1390 {
1391 STAM_COUNTER_INC(&gStatRefuseWP0);
1392 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1393 return false;
1394 }
1395
1396 if (PATMIsPatchGCAddr(env->pVM, eip))
1397 {
1398 Log2(("raw r0 mode forced: patch code\n"));
1399 *piException = EXCP_EXECUTE_RAW;
1400 return true;
1401 }
1402
1403#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1404 if (!(env->eflags & IF_MASK))
1405 {
1406 STAM_COUNTER_INC(&gStatRefuseIF0);
1407 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1408 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1409 return false;
1410 }
1411#endif
1412
1413 env->state |= CPU_RAW_RING0;
1414 }
1415
1416 /*
1417 * Don't reschedule the first time we're called, because there might be
1418 * special reasons why we're here that is not covered by the above checks.
1419 */
1420 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1421 {
1422 Log2(("raw mode refused: first scheduling\n"));
1423 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1424 return false;
1425 }
1426
1427 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1428 *piException = EXCP_EXECUTE_RAW;
1429 return true;
1430}
1431
1432
1433/**
1434 * Fetches a code byte.
1435 *
1436 * @returns Success indicator (bool) for ease of use.
1437 * @param env The CPU environment structure.
1438 * @param GCPtrInstr Where to fetch code.
1439 * @param pu8Byte Where to store the byte on success
1440 */
1441bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1442{
1443 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1444 if (RT_SUCCESS(rc))
1445 return true;
1446 return false;
1447}
1448
1449
1450/**
1451 * Flush (or invalidate if you like) page table/dir entry.
1452 *
1453 * (invlpg instruction; tlb_flush_page)
1454 *
1455 * @param env Pointer to cpu environment.
1456 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1457 */
1458void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1459{
1460 PVM pVM = env->pVM;
1461 PCPUMCTX pCtx;
1462 int rc;
1463
1464 /*
1465 * When we're replaying invlpg instructions or restoring a saved
1466 * state we disable this path.
1467 */
1468 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1469 return;
1470 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1471 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1472
1473 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1474
1475 /*
1476 * Update the control registers before calling PGMFlushPage.
1477 */
1478 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1479 Assert(pCtx);
1480 pCtx->cr0 = env->cr[0];
1481 pCtx->cr3 = env->cr[3];
1482 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1483 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1484 pCtx->cr4 = env->cr[4];
1485
1486 /*
1487 * Let PGM do the rest.
1488 */
1489 Assert(env->pVCpu);
1490 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1491 if (RT_FAILURE(rc))
1492 {
1493 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1494 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1495 }
1496 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1497}
1498
1499
1500#ifndef REM_PHYS_ADDR_IN_TLB
1501/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1502void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1503{
1504 void *pv;
1505 int rc;
1506
1507 /* Address must be aligned enough to fiddle with lower bits */
1508 Assert((physAddr & 0x3) == 0);
1509
1510 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1511 Assert( rc == VINF_SUCCESS
1512 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1513 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1514 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1515 if (RT_FAILURE(rc))
1516 return (void *)1;
1517 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1518 return (void *)((uintptr_t)pv | 2);
1519 return pv;
1520}
1521#endif /* REM_PHYS_ADDR_IN_TLB */
1522
1523
1524/**
1525 * Called from tlb_protect_code in order to write monitor a code page.
1526 *
1527 * @param env Pointer to the CPU environment.
1528 * @param GCPtr Code page to monitor
1529 */
1530void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1531{
1532#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1533 Assert(env->pVM->rem.s.fInREM);
1534 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1535 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1536 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1537 && !(env->eflags & VM_MASK) /* no V86 mode */
1538 && !HWACCMIsEnabled(env->pVM))
1539 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1540#endif
1541}
1542
1543
1544/**
1545 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1546 *
1547 * @param env Pointer to the CPU environment.
1548 * @param GCPtr Code page to monitor
1549 */
1550void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1551{
1552 Assert(env->pVM->rem.s.fInREM);
1553#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1554 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1555 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1556 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1557 && !(env->eflags & VM_MASK) /* no V86 mode */
1558 && !HWACCMIsEnabled(env->pVM))
1559 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1560#endif
1561}
1562
1563
1564/**
1565 * Called when the CPU is initialized, any of the CRx registers are changed or
1566 * when the A20 line is modified.
1567 *
1568 * @param env Pointer to the CPU environment.
1569 * @param fGlobal Set if the flush is global.
1570 */
1571void remR3FlushTLB(CPUState *env, bool fGlobal)
1572{
1573 PVM pVM = env->pVM;
1574 PCPUMCTX pCtx;
1575
1576 /*
1577 * When we're replaying invlpg instructions or restoring a saved
1578 * state we disable this path.
1579 */
1580 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1581 return;
1582 Assert(pVM->rem.s.fInREM);
1583
1584 /*
1585 * The caller doesn't check cr4, so we have to do that for ourselves.
1586 */
1587 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1588 fGlobal = true;
1589 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1590
1591 /*
1592 * Update the control registers before calling PGMR3FlushTLB.
1593 */
1594 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1595 Assert(pCtx);
1596 pCtx->cr0 = env->cr[0];
1597 pCtx->cr3 = env->cr[3];
1598 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1599 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1600 pCtx->cr4 = env->cr[4];
1601
1602 /*
1603 * Let PGM do the rest.
1604 */
1605 Assert(env->pVCpu);
1606 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1607}
1608
1609
1610/**
1611 * Called when any of the cr0, cr4 or efer registers is updated.
1612 *
1613 * @param env Pointer to the CPU environment.
1614 */
1615void remR3ChangeCpuMode(CPUState *env)
1616{
1617 PVM pVM = env->pVM;
1618 uint64_t efer;
1619 PCPUMCTX pCtx;
1620 int rc;
1621
1622 /*
1623 * When we're replaying loads or restoring a saved
1624 * state this path is disabled.
1625 */
1626 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1627 return;
1628 Assert(pVM->rem.s.fInREM);
1629
1630 /*
1631 * Update the control registers before calling PGMChangeMode()
1632 * as it may need to map whatever cr3 is pointing to.
1633 */
1634 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1635 Assert(pCtx);
1636 pCtx->cr0 = env->cr[0];
1637 pCtx->cr3 = env->cr[3];
1638 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1639 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1640 pCtx->cr4 = env->cr[4];
1641
1642#ifdef TARGET_X86_64
1643 efer = env->efer;
1644#else
1645 efer = 0;
1646#endif
1647 Assert(env->pVCpu);
1648 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1649 if (rc != VINF_SUCCESS)
1650 {
1651 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1652 {
1653 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1654 remR3RaiseRC(env->pVM, rc);
1655 }
1656 else
1657 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1658 }
1659}
1660
1661
1662/**
1663 * Called from compiled code to run dma.
1664 *
1665 * @param env Pointer to the CPU environment.
1666 */
1667void remR3DmaRun(CPUState *env)
1668{
1669 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1670 PDMR3DmaRun(env->pVM);
1671 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1672}
1673
1674
1675/**
1676 * Called from compiled code to schedule pending timers in VMM
1677 *
1678 * @param env Pointer to the CPU environment.
1679 */
1680void remR3TimersRun(CPUState *env)
1681{
1682 LogFlow(("remR3TimersRun:\n"));
1683 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1684 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1685 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1686 TMR3TimerQueuesDo(env->pVM);
1687 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1688 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1689}
1690
1691
1692/**
1693 * Record trap occurance
1694 *
1695 * @returns VBox status code
1696 * @param env Pointer to the CPU environment.
1697 * @param uTrap Trap nr
1698 * @param uErrorCode Error code
1699 * @param pvNextEIP Next EIP
1700 */
1701int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1702{
1703 PVM pVM = env->pVM;
1704#ifdef VBOX_WITH_STATISTICS
1705 static STAMCOUNTER s_aStatTrap[255];
1706 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1707#endif
1708
1709#ifdef VBOX_WITH_STATISTICS
1710 if (uTrap < 255)
1711 {
1712 if (!s_aRegisters[uTrap])
1713 {
1714 char szStatName[64];
1715 s_aRegisters[uTrap] = true;
1716 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1717 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1718 }
1719 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1720 }
1721#endif
1722 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1723 if( uTrap < 0x20
1724 && (env->cr[0] & X86_CR0_PE)
1725 && !(env->eflags & X86_EFL_VM))
1726 {
1727#ifdef DEBUG
1728 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1729#endif
1730 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1731 {
1732 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1733 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1734 return VERR_REM_TOO_MANY_TRAPS;
1735 }
1736 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1737 pVM->rem.s.cPendingExceptions = 1;
1738 pVM->rem.s.uPendingException = uTrap;
1739 pVM->rem.s.uPendingExcptEIP = env->eip;
1740 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1741 }
1742 else
1743 {
1744 pVM->rem.s.cPendingExceptions = 0;
1745 pVM->rem.s.uPendingException = uTrap;
1746 pVM->rem.s.uPendingExcptEIP = env->eip;
1747 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1748 }
1749 return VINF_SUCCESS;
1750}
1751
1752
1753/*
1754 * Clear current active trap
1755 *
1756 * @param pVM VM Handle.
1757 */
1758void remR3TrapClear(PVM pVM)
1759{
1760 pVM->rem.s.cPendingExceptions = 0;
1761 pVM->rem.s.uPendingException = 0;
1762 pVM->rem.s.uPendingExcptEIP = 0;
1763 pVM->rem.s.uPendingExcptCR2 = 0;
1764}
1765
1766
1767/*
1768 * Record previous call instruction addresses
1769 *
1770 * @param env Pointer to the CPU environment.
1771 */
1772void remR3RecordCall(CPUState *env)
1773{
1774 CSAMR3RecordCallAddress(env->pVM, env->eip);
1775}
1776
1777
1778/**
1779 * Syncs the internal REM state with the VM.
1780 *
1781 * This must be called before REMR3Run() is invoked whenever when the REM
1782 * state is not up to date. Calling it several times in a row is not
1783 * permitted.
1784 *
1785 * @returns VBox status code.
1786 *
1787 * @param pVM VM Handle.
1788 * @param pVCpu VMCPU Handle.
1789 *
1790 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1791 * no do this since the majority of the callers don't want any unnecessary of events
1792 * pending that would immediatly interrupt execution.
1793 */
1794REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1795{
1796 register const CPUMCTX *pCtx;
1797 register unsigned fFlags;
1798 bool fHiddenSelRegsValid;
1799 unsigned i;
1800 TRPMEVENT enmType;
1801 uint8_t u8TrapNo;
1802 int rc;
1803
1804 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1805 Log2(("REMR3State:\n"));
1806
1807 pVM->rem.s.Env.pVCpu = pVCpu;
1808 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1809 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1810
1811 Assert(!pVM->rem.s.fInREM);
1812 pVM->rem.s.fInStateSync = true;
1813
1814 /*
1815 * If we have to flush TBs, do that immediately.
1816 */
1817 if (pVM->rem.s.fFlushTBs)
1818 {
1819 STAM_COUNTER_INC(&gStatFlushTBs);
1820 tb_flush(&pVM->rem.s.Env);
1821 pVM->rem.s.fFlushTBs = false;
1822 }
1823
1824 /*
1825 * Copy the registers which require no special handling.
1826 */
1827#ifdef TARGET_X86_64
1828 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1829 Assert(R_EAX == 0);
1830 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1831 Assert(R_ECX == 1);
1832 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1833 Assert(R_EDX == 2);
1834 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1835 Assert(R_EBX == 3);
1836 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1837 Assert(R_ESP == 4);
1838 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1839 Assert(R_EBP == 5);
1840 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1841 Assert(R_ESI == 6);
1842 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1843 Assert(R_EDI == 7);
1844 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1845 pVM->rem.s.Env.regs[8] = pCtx->r8;
1846 pVM->rem.s.Env.regs[9] = pCtx->r9;
1847 pVM->rem.s.Env.regs[10] = pCtx->r10;
1848 pVM->rem.s.Env.regs[11] = pCtx->r11;
1849 pVM->rem.s.Env.regs[12] = pCtx->r12;
1850 pVM->rem.s.Env.regs[13] = pCtx->r13;
1851 pVM->rem.s.Env.regs[14] = pCtx->r14;
1852 pVM->rem.s.Env.regs[15] = pCtx->r15;
1853
1854 pVM->rem.s.Env.eip = pCtx->rip;
1855
1856 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1857#else
1858 Assert(R_EAX == 0);
1859 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1860 Assert(R_ECX == 1);
1861 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1862 Assert(R_EDX == 2);
1863 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1864 Assert(R_EBX == 3);
1865 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1866 Assert(R_ESP == 4);
1867 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1868 Assert(R_EBP == 5);
1869 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1870 Assert(R_ESI == 6);
1871 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1872 Assert(R_EDI == 7);
1873 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1874 pVM->rem.s.Env.eip = pCtx->eip;
1875
1876 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1877#endif
1878
1879 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1880
1881 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1882 for (i=0;i<8;i++)
1883 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1884
1885 /*
1886 * Clear the halted hidden flag (the interrupt waking up the CPU can
1887 * have been dispatched in raw mode).
1888 */
1889 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1890
1891 /*
1892 * Replay invlpg?
1893 */
1894 if (pVM->rem.s.cInvalidatedPages)
1895 {
1896 RTUINT i;
1897
1898 pVM->rem.s.fIgnoreInvlPg = true;
1899 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1900 {
1901 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1902 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1903 }
1904 pVM->rem.s.fIgnoreInvlPg = false;
1905 pVM->rem.s.cInvalidatedPages = 0;
1906 }
1907
1908 /* Replay notification changes. */
1909 REMR3ReplayHandlerNotifications(pVM);
1910
1911 /* Update MSRs; before CRx registers! */
1912 pVM->rem.s.Env.efer = pCtx->msrEFER;
1913 pVM->rem.s.Env.star = pCtx->msrSTAR;
1914 pVM->rem.s.Env.pat = pCtx->msrPAT;
1915#ifdef TARGET_X86_64
1916 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1917 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1918 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1919 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1920
1921 /* Update the internal long mode activate flag according to the new EFER value. */
1922 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1923 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1924 else
1925 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1926#endif
1927
1928 /*
1929 * Registers which are rarely changed and require special handling / order when changed.
1930 */
1931 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1932 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1933 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1934 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1935 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1936 {
1937 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1938 {
1939 pVM->rem.s.fIgnoreCR3Load = true;
1940 tlb_flush(&pVM->rem.s.Env, true);
1941 pVM->rem.s.fIgnoreCR3Load = false;
1942 }
1943
1944 /* CR4 before CR0! */
1945 if (fFlags & CPUM_CHANGED_CR4)
1946 {
1947 pVM->rem.s.fIgnoreCR3Load = true;
1948 pVM->rem.s.fIgnoreCpuMode = true;
1949 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1950 pVM->rem.s.fIgnoreCpuMode = false;
1951 pVM->rem.s.fIgnoreCR3Load = false;
1952 }
1953
1954 if (fFlags & CPUM_CHANGED_CR0)
1955 {
1956 pVM->rem.s.fIgnoreCR3Load = true;
1957 pVM->rem.s.fIgnoreCpuMode = true;
1958 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1959 pVM->rem.s.fIgnoreCpuMode = false;
1960 pVM->rem.s.fIgnoreCR3Load = false;
1961 }
1962
1963 if (fFlags & CPUM_CHANGED_CR3)
1964 {
1965 pVM->rem.s.fIgnoreCR3Load = true;
1966 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1967 pVM->rem.s.fIgnoreCR3Load = false;
1968 }
1969
1970 if (fFlags & CPUM_CHANGED_GDTR)
1971 {
1972 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1973 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1974 }
1975
1976 if (fFlags & CPUM_CHANGED_IDTR)
1977 {
1978 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1979 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1980 }
1981
1982 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1983 {
1984 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1985 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1986 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1987 }
1988
1989 if (fFlags & CPUM_CHANGED_LDTR)
1990 {
1991 if (fHiddenSelRegsValid)
1992 {
1993 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1994 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1995 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1996 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1997 }
1998 else
1999 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2000 }
2001
2002 if (fFlags & CPUM_CHANGED_CPUID)
2003 {
2004 uint32_t u32Dummy;
2005
2006 /*
2007 * Get the CPUID features.
2008 */
2009 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2010 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2011 }
2012
2013 /* Sync FPU state after CR4, CPUID and EFER (!). */
2014 if (fFlags & CPUM_CHANGED_FPU_REM)
2015 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2016 }
2017
2018 /*
2019 * Sync TR unconditionally to make life simpler.
2020 */
2021 pVM->rem.s.Env.tr.selector = pCtx->tr;
2022 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2023 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2024 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2025 /* Note! do_interrupt will fault if the busy flag is still set... */
2026 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2027
2028 /*
2029 * Update selector registers.
2030 * This must be done *after* we've synced gdt, ldt and crX registers
2031 * since we're reading the GDT/LDT om sync_seg. This will happen with
2032 * saved state which takes a quick dip into rawmode for instance.
2033 */
2034 /*
2035 * Stack; Note first check this one as the CPL might have changed. The
2036 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2037 */
2038
2039 if (fHiddenSelRegsValid)
2040 {
2041 /* The hidden selector registers are valid in the CPU context. */
2042 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2043
2044 /* Set current CPL */
2045 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2046
2047 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2048 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2049 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2050 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2051 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2052 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2053 }
2054 else
2055 {
2056 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2057 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2058 {
2059 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2060
2061 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2062 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2063#ifdef VBOX_WITH_STATISTICS
2064 if (pVM->rem.s.Env.segs[R_SS].newselector)
2065 {
2066 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2067 }
2068#endif
2069 }
2070 else
2071 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2072
2073 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2074 {
2075 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2076 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2077#ifdef VBOX_WITH_STATISTICS
2078 if (pVM->rem.s.Env.segs[R_ES].newselector)
2079 {
2080 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2081 }
2082#endif
2083 }
2084 else
2085 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2086
2087 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2088 {
2089 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2090 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2091#ifdef VBOX_WITH_STATISTICS
2092 if (pVM->rem.s.Env.segs[R_CS].newselector)
2093 {
2094 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2095 }
2096#endif
2097 }
2098 else
2099 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2100
2101 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2102 {
2103 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2104 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2105#ifdef VBOX_WITH_STATISTICS
2106 if (pVM->rem.s.Env.segs[R_DS].newselector)
2107 {
2108 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2109 }
2110#endif
2111 }
2112 else
2113 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2114
2115 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2116 * be the same but not the base/limit. */
2117 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2118 {
2119 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2120 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2121#ifdef VBOX_WITH_STATISTICS
2122 if (pVM->rem.s.Env.segs[R_FS].newselector)
2123 {
2124 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2125 }
2126#endif
2127 }
2128 else
2129 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2130
2131 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2132 {
2133 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2134 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2135#ifdef VBOX_WITH_STATISTICS
2136 if (pVM->rem.s.Env.segs[R_GS].newselector)
2137 {
2138 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2139 }
2140#endif
2141 }
2142 else
2143 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2144 }
2145
2146 /*
2147 * Check for traps.
2148 */
2149 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2150 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2151 if (RT_SUCCESS(rc))
2152 {
2153#ifdef DEBUG
2154 if (u8TrapNo == 0x80)
2155 {
2156 remR3DumpLnxSyscall(pVCpu);
2157 remR3DumpOBsdSyscall(pVCpu);
2158 }
2159#endif
2160
2161 pVM->rem.s.Env.exception_index = u8TrapNo;
2162 if (enmType != TRPM_SOFTWARE_INT)
2163 {
2164 pVM->rem.s.Env.exception_is_int = 0;
2165 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2166 }
2167 else
2168 {
2169 /*
2170 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2171 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2172 * for int03 and into.
2173 */
2174 pVM->rem.s.Env.exception_is_int = 1;
2175 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2176 /* int 3 may be generated by one-byte 0xcc */
2177 if (u8TrapNo == 3)
2178 {
2179 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2180 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2181 }
2182 /* int 4 may be generated by one-byte 0xce */
2183 else if (u8TrapNo == 4)
2184 {
2185 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2186 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2187 }
2188 }
2189
2190 /* get error code and cr2 if needed. */
2191 switch (u8TrapNo)
2192 {
2193 case 0x0e:
2194 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2195 /* fallthru */
2196 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2197 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2198 break;
2199
2200 case 0x11: case 0x08:
2201 default:
2202 pVM->rem.s.Env.error_code = 0;
2203 break;
2204 }
2205
2206 /*
2207 * We can now reset the active trap since the recompiler is gonna have a go at it.
2208 */
2209 rc = TRPMResetTrap(pVCpu);
2210 AssertRC(rc);
2211 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2212 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2213 }
2214
2215 /*
2216 * Clear old interrupt request flags; Check for pending hardware interrupts.
2217 * (See @remark for why we don't check for other FFs.)
2218 */
2219 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2220 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2221 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2222 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2223
2224 /*
2225 * We're now in REM mode.
2226 */
2227 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2228 pVM->rem.s.fInREM = true;
2229 pVM->rem.s.fInStateSync = false;
2230 pVM->rem.s.cCanExecuteRaw = 0;
2231 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2232 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2233 return VINF_SUCCESS;
2234}
2235
2236
2237/**
2238 * Syncs back changes in the REM state to the the VM state.
2239 *
2240 * This must be called after invoking REMR3Run().
2241 * Calling it several times in a row is not permitted.
2242 *
2243 * @returns VBox status code.
2244 *
2245 * @param pVM VM Handle.
2246 * @param pVCpu VMCPU Handle.
2247 */
2248REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2249{
2250 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2251 Assert(pCtx);
2252 unsigned i;
2253
2254 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2255 Log2(("REMR3StateBack:\n"));
2256 Assert(pVM->rem.s.fInREM);
2257
2258 /*
2259 * Copy back the registers.
2260 * This is done in the order they are declared in the CPUMCTX structure.
2261 */
2262
2263 /** @todo FOP */
2264 /** @todo FPUIP */
2265 /** @todo CS */
2266 /** @todo FPUDP */
2267 /** @todo DS */
2268
2269 /** @todo check if FPU/XMM was actually used in the recompiler */
2270 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2271//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2272
2273#ifdef TARGET_X86_64
2274 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2275 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2276 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2277 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2278 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2279 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2280 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2281 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2282 pCtx->r8 = pVM->rem.s.Env.regs[8];
2283 pCtx->r9 = pVM->rem.s.Env.regs[9];
2284 pCtx->r10 = pVM->rem.s.Env.regs[10];
2285 pCtx->r11 = pVM->rem.s.Env.regs[11];
2286 pCtx->r12 = pVM->rem.s.Env.regs[12];
2287 pCtx->r13 = pVM->rem.s.Env.regs[13];
2288 pCtx->r14 = pVM->rem.s.Env.regs[14];
2289 pCtx->r15 = pVM->rem.s.Env.regs[15];
2290
2291 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2292
2293#else
2294 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2295 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2296 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2297 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2298 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2299 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2300 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2301
2302 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2303#endif
2304
2305 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2306
2307#ifdef VBOX_WITH_STATISTICS
2308 if (pVM->rem.s.Env.segs[R_SS].newselector)
2309 {
2310 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2311 }
2312 if (pVM->rem.s.Env.segs[R_GS].newselector)
2313 {
2314 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2315 }
2316 if (pVM->rem.s.Env.segs[R_FS].newselector)
2317 {
2318 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2319 }
2320 if (pVM->rem.s.Env.segs[R_ES].newselector)
2321 {
2322 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2323 }
2324 if (pVM->rem.s.Env.segs[R_DS].newselector)
2325 {
2326 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2327 }
2328 if (pVM->rem.s.Env.segs[R_CS].newselector)
2329 {
2330 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2331 }
2332#endif
2333 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2334 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2335 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2336 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2337 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2338
2339#ifdef TARGET_X86_64
2340 pCtx->rip = pVM->rem.s.Env.eip;
2341 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2342#else
2343 pCtx->eip = pVM->rem.s.Env.eip;
2344 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2345#endif
2346
2347 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2348 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2349 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2350 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2351 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2352 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2353
2354 for (i = 0; i < 8; i++)
2355 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2356
2357 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2358 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2359 {
2360 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2361 STAM_COUNTER_INC(&gStatREMGDTChange);
2362 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2363 }
2364
2365 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2366 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2367 {
2368 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2369 STAM_COUNTER_INC(&gStatREMIDTChange);
2370 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2371 }
2372
2373 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2374 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2375 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2376 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2377 {
2378 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2379 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2380 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2381 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2382 STAM_COUNTER_INC(&gStatREMLDTRChange);
2383 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2384 }
2385
2386 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2387 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2388 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2389 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2390 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2391 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2392 : 0) )
2393 {
2394 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2395 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2396 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2397 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2398 pCtx->tr = pVM->rem.s.Env.tr.selector;
2399 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2400 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2401 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2402 if (pCtx->trHid.Attr.u)
2403 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2404 STAM_COUNTER_INC(&gStatREMTRChange);
2405 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2406 }
2407
2408 /** @todo These values could still be out of sync! */
2409 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2410 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2411 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2412 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2413
2414 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2415 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2416 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2417
2418 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2419 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2420 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2421
2422 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2423 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2424 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2425
2426 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2427 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2428 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2429
2430 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2431 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2432 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2433
2434 /* Sysenter MSR */
2435 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2436 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2437 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2438
2439 /* System MSRs. */
2440 pCtx->msrEFER = pVM->rem.s.Env.efer;
2441 pCtx->msrSTAR = pVM->rem.s.Env.star;
2442 pCtx->msrPAT = pVM->rem.s.Env.pat;
2443#ifdef TARGET_X86_64
2444 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2445 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2446 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2447 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2448#endif
2449
2450 remR3TrapClear(pVM);
2451
2452 /*
2453 * Check for traps.
2454 */
2455 if ( pVM->rem.s.Env.exception_index >= 0
2456 && pVM->rem.s.Env.exception_index < 256)
2457 {
2458 int rc;
2459
2460 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2461 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2462 AssertRC(rc);
2463 switch (pVM->rem.s.Env.exception_index)
2464 {
2465 case 0x0e:
2466 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2467 /* fallthru */
2468 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2469 case 0x11: case 0x08: /* 0 */
2470 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2471 break;
2472 }
2473
2474 }
2475
2476 /*
2477 * We're not longer in REM mode.
2478 */
2479 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2480 pVM->rem.s.fInREM = false;
2481 pVM->rem.s.pCtx = NULL;
2482 pVM->rem.s.Env.pVCpu = NULL;
2483 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2484 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2485 return VINF_SUCCESS;
2486}
2487
2488
2489/**
2490 * This is called by the disassembler when it wants to update the cpu state
2491 * before for instance doing a register dump.
2492 */
2493static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2494{
2495 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2496 unsigned i;
2497
2498 Assert(pVM->rem.s.fInREM);
2499
2500 /*
2501 * Copy back the registers.
2502 * This is done in the order they are declared in the CPUMCTX structure.
2503 */
2504
2505 /** @todo FOP */
2506 /** @todo FPUIP */
2507 /** @todo CS */
2508 /** @todo FPUDP */
2509 /** @todo DS */
2510 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2511 pCtx->fpu.MXCSR = 0;
2512 pCtx->fpu.MXCSR_MASK = 0;
2513
2514 /** @todo check if FPU/XMM was actually used in the recompiler */
2515 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2516//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2517
2518#ifdef TARGET_X86_64
2519 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2520 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2521 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2522 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2523 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2524 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2525 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2526 pCtx->r8 = pVM->rem.s.Env.regs[8];
2527 pCtx->r9 = pVM->rem.s.Env.regs[9];
2528 pCtx->r10 = pVM->rem.s.Env.regs[10];
2529 pCtx->r11 = pVM->rem.s.Env.regs[11];
2530 pCtx->r12 = pVM->rem.s.Env.regs[12];
2531 pCtx->r13 = pVM->rem.s.Env.regs[13];
2532 pCtx->r14 = pVM->rem.s.Env.regs[14];
2533 pCtx->r15 = pVM->rem.s.Env.regs[15];
2534
2535 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2536#else
2537 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2538 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2539 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2540 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2541 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2542 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2543 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2544
2545 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2546#endif
2547
2548 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2549
2550 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2551 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2552 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2553 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2554 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2555
2556#ifdef TARGET_X86_64
2557 pCtx->rip = pVM->rem.s.Env.eip;
2558 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2559#else
2560 pCtx->eip = pVM->rem.s.Env.eip;
2561 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2562#endif
2563
2564 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2565 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2566 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2567 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2568 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2569 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2570
2571 for (i = 0; i < 8; i++)
2572 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2573
2574 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2575 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2576 {
2577 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2578 STAM_COUNTER_INC(&gStatREMGDTChange);
2579 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2580 }
2581
2582 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2583 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2584 {
2585 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2586 STAM_COUNTER_INC(&gStatREMIDTChange);
2587 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2588 }
2589
2590 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2591 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2592 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2593 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2594 {
2595 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2596 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2597 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2598 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2599 STAM_COUNTER_INC(&gStatREMLDTRChange);
2600 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2601 }
2602
2603 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2604 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2605 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2606 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2607 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2608 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2609 : 0) )
2610 {
2611 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2612 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2613 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2614 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2615 pCtx->tr = pVM->rem.s.Env.tr.selector;
2616 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2617 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2618 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2619 if (pCtx->trHid.Attr.u)
2620 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2621 STAM_COUNTER_INC(&gStatREMTRChange);
2622 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2623 }
2624
2625 /** @todo These values could still be out of sync! */
2626 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2627 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2628 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2629 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2630
2631 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2632 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2633 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2634
2635 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2636 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2637 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2638
2639 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2640 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2641 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2642
2643 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2644 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2645 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2646
2647 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2648 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2649 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2650
2651 /* Sysenter MSR */
2652 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2653 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2654 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2655
2656 /* System MSRs. */
2657 pCtx->msrEFER = pVM->rem.s.Env.efer;
2658 pCtx->msrSTAR = pVM->rem.s.Env.star;
2659 pCtx->msrPAT = pVM->rem.s.Env.pat;
2660#ifdef TARGET_X86_64
2661 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2662 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2663 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2664 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2665#endif
2666
2667}
2668
2669
2670/**
2671 * Update the VMM state information if we're currently in REM.
2672 *
2673 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2674 * we're currently executing in REM and the VMM state is invalid. This method will of
2675 * course check that we're executing in REM before syncing any data over to the VMM.
2676 *
2677 * @param pVM The VM handle.
2678 * @param pVCpu The VMCPU handle.
2679 */
2680REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2681{
2682 if (pVM->rem.s.fInREM)
2683 remR3StateUpdate(pVM, pVCpu);
2684}
2685
2686
2687#undef LOG_GROUP
2688#define LOG_GROUP LOG_GROUP_REM
2689
2690
2691/**
2692 * Notify the recompiler about Address Gate 20 state change.
2693 *
2694 * This notification is required since A20 gate changes are
2695 * initialized from a device driver and the VM might just as
2696 * well be in REM mode as in RAW mode.
2697 *
2698 * @param pVM VM handle.
2699 * @param pVCpu VMCPU handle.
2700 * @param fEnable True if the gate should be enabled.
2701 * False if the gate should be disabled.
2702 */
2703REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2704{
2705 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2706 VM_ASSERT_EMT(pVM);
2707
2708 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2709 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2710 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2711}
2712
2713
2714/**
2715 * Replays the handler notification changes
2716 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2717 *
2718 * @param pVM VM handle.
2719 */
2720REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2721{
2722 /*
2723 * Replay the flushes.
2724 */
2725 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2726 VM_ASSERT_EMT(pVM);
2727
2728 /** @todo this isn't ensuring correct replay order. */
2729 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY_BIT))
2730 {
2731 uint32_t idxNext;
2732 uint32_t idxRevHead;
2733 uint32_t idxHead;
2734#ifdef VBOX_STRICT
2735 int32_t c = 0;
2736#endif
2737
2738 /* Lockless purging of pending notifications. */
2739 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2740 if (idxHead == UINT32_MAX)
2741 return;
2742 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2743
2744 /*
2745 * Reverse the list to process it in FIFO order.
2746 */
2747 idxRevHead = UINT32_MAX;
2748 do
2749 {
2750 /* Save the index of the next rec. */
2751 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2752 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2753 /* Push the record onto the reversed list. */
2754 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2755 idxRevHead = idxHead;
2756 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2757 /* Advance. */
2758 idxHead = idxNext;
2759 } while (idxHead != UINT32_MAX);
2760
2761 /*
2762 * Loop thru the list, reinserting the record into the free list as they are
2763 * processed to avoid having other EMTs running out of entries while we're flushing.
2764 */
2765 idxHead = idxRevHead;
2766 do
2767 {
2768 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2769 uint32_t idxCur;
2770 Assert(--c >= 0);
2771
2772 switch (pCur->enmKind)
2773 {
2774 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2775 remR3NotifyHandlerPhysicalRegister(pVM,
2776 pCur->u.PhysicalRegister.enmType,
2777 pCur->u.PhysicalRegister.GCPhys,
2778 pCur->u.PhysicalRegister.cb,
2779 pCur->u.PhysicalRegister.fHasHCHandler);
2780 break;
2781
2782 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2783 remR3NotifyHandlerPhysicalDeregister(pVM,
2784 pCur->u.PhysicalDeregister.enmType,
2785 pCur->u.PhysicalDeregister.GCPhys,
2786 pCur->u.PhysicalDeregister.cb,
2787 pCur->u.PhysicalDeregister.fHasHCHandler,
2788 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2789 break;
2790
2791 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2792 remR3NotifyHandlerPhysicalModify(pVM,
2793 pCur->u.PhysicalModify.enmType,
2794 pCur->u.PhysicalModify.GCPhysOld,
2795 pCur->u.PhysicalModify.GCPhysNew,
2796 pCur->u.PhysicalModify.cb,
2797 pCur->u.PhysicalModify.fHasHCHandler,
2798 pCur->u.PhysicalModify.fRestoreAsRAM);
2799 break;
2800
2801 default:
2802 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2803 break;
2804 }
2805
2806 /*
2807 * Advance idxHead.
2808 */
2809 idxCur = idxHead;
2810 idxHead = pCur->idxNext;
2811 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
2812
2813 /*
2814 * Put the record back into the free list.
2815 */
2816 do
2817 {
2818 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2819 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2820 ASMCompilerBarrier();
2821 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
2822 } while (idxHead != UINT32_MAX);
2823
2824#ifdef VBOX_STRICT
2825 if (pVM->cCPUs == 1)
2826 {
2827 /* Check that all records are now on the free list. */
2828 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
2829 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
2830 c++;
2831 AssertMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
2832 }
2833#endif
2834 }
2835}
2836
2837
2838/**
2839 * Notify REM about changed code page.
2840 *
2841 * @returns VBox status code.
2842 * @param pVM VM handle.
2843 * @param pVCpu VMCPU handle.
2844 * @param pvCodePage Code page address
2845 */
2846REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2847{
2848#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2849 int rc;
2850 RTGCPHYS PhysGC;
2851 uint64_t flags;
2852
2853 VM_ASSERT_EMT(pVM);
2854
2855 /*
2856 * Get the physical page address.
2857 */
2858 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2859 if (rc == VINF_SUCCESS)
2860 {
2861 /*
2862 * Sync the required registers and flush the whole page.
2863 * (Easier to do the whole page than notifying it about each physical
2864 * byte that was changed.
2865 */
2866 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2867 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2868 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2869 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2870
2871 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2872 }
2873#endif
2874 return VINF_SUCCESS;
2875}
2876
2877
2878/**
2879 * Notification about a successful MMR3PhysRegister() call.
2880 *
2881 * @param pVM VM handle.
2882 * @param GCPhys The physical address the RAM.
2883 * @param cb Size of the memory.
2884 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2885 */
2886REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2887{
2888 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2889 VM_ASSERT_EMT(pVM);
2890
2891 /*
2892 * Validate input - we trust the caller.
2893 */
2894 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2895 Assert(cb);
2896 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2897 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2898
2899 /*
2900 * Base ram? Update GCPhysLastRam.
2901 */
2902 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2903 {
2904 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2905 {
2906 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2907 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2908 }
2909 }
2910
2911 /*
2912 * Register the ram.
2913 */
2914 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2915
2916 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2917 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2918 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2919
2920 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2921}
2922
2923
2924/**
2925 * Notification about a successful MMR3PhysRomRegister() call.
2926 *
2927 * @param pVM VM handle.
2928 * @param GCPhys The physical address of the ROM.
2929 * @param cb The size of the ROM.
2930 * @param pvCopy Pointer to the ROM copy.
2931 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2932 * This function will be called when ever the protection of the
2933 * shadow ROM changes (at reset and end of POST).
2934 */
2935REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2936{
2937 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2938 VM_ASSERT_EMT(pVM);
2939
2940 /*
2941 * Validate input - we trust the caller.
2942 */
2943 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2944 Assert(cb);
2945 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2946
2947 /*
2948 * Register the rom.
2949 */
2950 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2951
2952 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2953 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2954 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2955
2956 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2957}
2958
2959
2960/**
2961 * Notification about a successful memory deregistration or reservation.
2962 *
2963 * @param pVM VM Handle.
2964 * @param GCPhys Start physical address.
2965 * @param cb The size of the range.
2966 */
2967REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2968{
2969 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2970 VM_ASSERT_EMT(pVM);
2971
2972 /*
2973 * Validate input - we trust the caller.
2974 */
2975 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2976 Assert(cb);
2977 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2978
2979 /*
2980 * Unassigning the memory.
2981 */
2982 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2983
2984 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2985 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2986 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2987
2988 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2989}
2990
2991
2992/**
2993 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2994 *
2995 * @param pVM VM Handle.
2996 * @param enmType Handler type.
2997 * @param GCPhys Handler range address.
2998 * @param cb Size of the handler range.
2999 * @param fHasHCHandler Set if the handler has a HC callback function.
3000 *
3001 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3002 * Handler memory type to memory which has no HC handler.
3003 */
3004static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3005{
3006 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3007 enmType, GCPhys, cb, fHasHCHandler));
3008
3009 VM_ASSERT_EMT(pVM);
3010 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3011 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3012
3013
3014 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3015
3016 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3017 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3018 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3019 else if (fHasHCHandler)
3020 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3021 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3022
3023 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3024}
3025
3026/**
3027 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3028 *
3029 * @param pVM VM Handle.
3030 * @param enmType Handler type.
3031 * @param GCPhys Handler range address.
3032 * @param cb Size of the handler range.
3033 * @param fHasHCHandler Set if the handler has a HC callback function.
3034 *
3035 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3036 * Handler memory type to memory which has no HC handler.
3037 */
3038REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3039{
3040 REMR3ReplayHandlerNotifications(pVM);
3041
3042 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3043}
3044
3045/**
3046 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3047 *
3048 * @param pVM VM Handle.
3049 * @param enmType Handler type.
3050 * @param GCPhys Handler range address.
3051 * @param cb Size of the handler range.
3052 * @param fHasHCHandler Set if the handler has a HC callback function.
3053 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3054 */
3055static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3056{
3057 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3058 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3059 VM_ASSERT_EMT(pVM);
3060
3061
3062 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3063
3064 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3065 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3066 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3067 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3068 else if (fHasHCHandler)
3069 {
3070 if (!fRestoreAsRAM)
3071 {
3072 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3073 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3074 }
3075 else
3076 {
3077 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3078 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3079 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3080 }
3081 }
3082 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3083
3084 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3085}
3086
3087/**
3088 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3089 *
3090 * @param pVM VM Handle.
3091 * @param enmType Handler type.
3092 * @param GCPhys Handler range address.
3093 * @param cb Size of the handler range.
3094 * @param fHasHCHandler Set if the handler has a HC callback function.
3095 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3096 */
3097REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3098{
3099 REMR3ReplayHandlerNotifications(pVM);
3100 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3101}
3102
3103
3104/**
3105 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3106 *
3107 * @param pVM VM Handle.
3108 * @param enmType Handler type.
3109 * @param GCPhysOld Old handler range address.
3110 * @param GCPhysNew New handler range address.
3111 * @param cb Size of the handler range.
3112 * @param fHasHCHandler Set if the handler has a HC callback function.
3113 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3114 */
3115static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3116{
3117 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3118 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3119 VM_ASSERT_EMT(pVM);
3120 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3121
3122 if (fHasHCHandler)
3123 {
3124 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3125
3126 /*
3127 * Reset the old page.
3128 */
3129 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3130 if (!fRestoreAsRAM)
3131 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3132 else
3133 {
3134 /* This is not perfect, but it'll do for PD monitoring... */
3135 Assert(cb == PAGE_SIZE);
3136 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3137 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3138 }
3139
3140 /*
3141 * Update the new page.
3142 */
3143 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3144 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3145 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3146 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3147
3148 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3149 }
3150}
3151
3152/**
3153 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3154 *
3155 * @param pVM VM Handle.
3156 * @param enmType Handler type.
3157 * @param GCPhysOld Old handler range address.
3158 * @param GCPhysNew New handler range address.
3159 * @param cb Size of the handler range.
3160 * @param fHasHCHandler Set if the handler has a HC callback function.
3161 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3162 */
3163REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3164{
3165 REMR3ReplayHandlerNotifications(pVM);
3166
3167 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3168}
3169
3170/**
3171 * Checks if we're handling access to this page or not.
3172 *
3173 * @returns true if we're trapping access.
3174 * @returns false if we aren't.
3175 * @param pVM The VM handle.
3176 * @param GCPhys The physical address.
3177 *
3178 * @remark This function will only work correctly in VBOX_STRICT builds!
3179 */
3180REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3181{
3182#ifdef VBOX_STRICT
3183 unsigned long off;
3184 REMR3ReplayHandlerNotifications(pVM);
3185
3186 off = get_phys_page_offset(GCPhys);
3187 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3188 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3189 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3190#else
3191 return false;
3192#endif
3193}
3194
3195
3196/**
3197 * Deals with a rare case in get_phys_addr_code where the code
3198 * is being monitored.
3199 *
3200 * It could also be an MMIO page, in which case we will raise a fatal error.
3201 *
3202 * @returns The physical address corresponding to addr.
3203 * @param env The cpu environment.
3204 * @param addr The virtual address.
3205 * @param pTLBEntry The TLB entry.
3206 */
3207target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3208 target_ulong addr,
3209 CPUTLBEntry* pTLBEntry,
3210 target_phys_addr_t ioTLBEntry)
3211{
3212 PVM pVM = env->pVM;
3213
3214 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3215 {
3216 /* If code memory is being monitored, appropriate IOTLB entry will have
3217 handler IO type, and addend will provide real physical address, no
3218 matter if we store VA in TLB or not, as handlers are always passed PA */
3219 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3220 return ret;
3221 }
3222 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3223 "*** handlers\n",
3224 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3225 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3226 LogRel(("*** mmio\n"));
3227 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3228 LogRel(("*** phys\n"));
3229 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3230 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3231 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3232 AssertFatalFailed();
3233}
3234
3235/**
3236 * Read guest RAM and ROM.
3237 *
3238 * @param SrcGCPhys The source address (guest physical).
3239 * @param pvDst The destination address.
3240 * @param cb Number of bytes
3241 */
3242void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3243{
3244 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3245 VBOX_CHECK_ADDR(SrcGCPhys);
3246 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3247#ifdef VBOX_DEBUG_PHYS
3248 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3249#endif
3250 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3251}
3252
3253
3254/**
3255 * Read guest RAM and ROM, unsigned 8-bit.
3256 *
3257 * @param SrcGCPhys The source address (guest physical).
3258 */
3259RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3260{
3261 uint8_t val;
3262 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3263 VBOX_CHECK_ADDR(SrcGCPhys);
3264 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3265 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3266#ifdef VBOX_DEBUG_PHYS
3267 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3268#endif
3269 return val;
3270}
3271
3272
3273/**
3274 * Read guest RAM and ROM, signed 8-bit.
3275 *
3276 * @param SrcGCPhys The source address (guest physical).
3277 */
3278RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3279{
3280 int8_t val;
3281 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3282 VBOX_CHECK_ADDR(SrcGCPhys);
3283 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3284 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3285#ifdef VBOX_DEBUG_PHYS
3286 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3287#endif
3288 return val;
3289}
3290
3291
3292/**
3293 * Read guest RAM and ROM, unsigned 16-bit.
3294 *
3295 * @param SrcGCPhys The source address (guest physical).
3296 */
3297RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3298{
3299 uint16_t val;
3300 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3301 VBOX_CHECK_ADDR(SrcGCPhys);
3302 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3303 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3304#ifdef VBOX_DEBUG_PHYS
3305 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3306#endif
3307 return val;
3308}
3309
3310
3311/**
3312 * Read guest RAM and ROM, signed 16-bit.
3313 *
3314 * @param SrcGCPhys The source address (guest physical).
3315 */
3316RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3317{
3318 int16_t val;
3319 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3320 VBOX_CHECK_ADDR(SrcGCPhys);
3321 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3322 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3323#ifdef VBOX_DEBUG_PHYS
3324 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3325#endif
3326 return val;
3327}
3328
3329
3330/**
3331 * Read guest RAM and ROM, unsigned 32-bit.
3332 *
3333 * @param SrcGCPhys The source address (guest physical).
3334 */
3335RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3336{
3337 uint32_t val;
3338 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3339 VBOX_CHECK_ADDR(SrcGCPhys);
3340 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3341 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3342#ifdef VBOX_DEBUG_PHYS
3343 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3344#endif
3345 return val;
3346}
3347
3348
3349/**
3350 * Read guest RAM and ROM, signed 32-bit.
3351 *
3352 * @param SrcGCPhys The source address (guest physical).
3353 */
3354RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3355{
3356 int32_t val;
3357 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3358 VBOX_CHECK_ADDR(SrcGCPhys);
3359 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3360 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3361#ifdef VBOX_DEBUG_PHYS
3362 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3363#endif
3364 return val;
3365}
3366
3367
3368/**
3369 * Read guest RAM and ROM, unsigned 64-bit.
3370 *
3371 * @param SrcGCPhys The source address (guest physical).
3372 */
3373uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3374{
3375 uint64_t val;
3376 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3377 VBOX_CHECK_ADDR(SrcGCPhys);
3378 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3379 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3380#ifdef VBOX_DEBUG_PHYS
3381 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3382#endif
3383 return val;
3384}
3385
3386
3387/**
3388 * Read guest RAM and ROM, signed 64-bit.
3389 *
3390 * @param SrcGCPhys The source address (guest physical).
3391 */
3392int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3393{
3394 int64_t val;
3395 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3396 VBOX_CHECK_ADDR(SrcGCPhys);
3397 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3398 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3399#ifdef VBOX_DEBUG_PHYS
3400 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3401#endif
3402 return val;
3403}
3404
3405
3406/**
3407 * Write guest RAM.
3408 *
3409 * @param DstGCPhys The destination address (guest physical).
3410 * @param pvSrc The source address.
3411 * @param cb Number of bytes to write
3412 */
3413void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3414{
3415 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3416 VBOX_CHECK_ADDR(DstGCPhys);
3417 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3418 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3419#ifdef VBOX_DEBUG_PHYS
3420 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3421#endif
3422}
3423
3424
3425/**
3426 * Write guest RAM, unsigned 8-bit.
3427 *
3428 * @param DstGCPhys The destination address (guest physical).
3429 * @param val Value
3430 */
3431void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3432{
3433 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3434 VBOX_CHECK_ADDR(DstGCPhys);
3435 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3436 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3437#ifdef VBOX_DEBUG_PHYS
3438 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3439#endif
3440}
3441
3442
3443/**
3444 * Write guest RAM, unsigned 8-bit.
3445 *
3446 * @param DstGCPhys The destination address (guest physical).
3447 * @param val Value
3448 */
3449void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3450{
3451 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3452 VBOX_CHECK_ADDR(DstGCPhys);
3453 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3454 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3455#ifdef VBOX_DEBUG_PHYS
3456 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3457#endif
3458}
3459
3460
3461/**
3462 * Write guest RAM, unsigned 32-bit.
3463 *
3464 * @param DstGCPhys The destination address (guest physical).
3465 * @param val Value
3466 */
3467void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3468{
3469 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3470 VBOX_CHECK_ADDR(DstGCPhys);
3471 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3472 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3473#ifdef VBOX_DEBUG_PHYS
3474 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3475#endif
3476}
3477
3478
3479/**
3480 * Write guest RAM, unsigned 64-bit.
3481 *
3482 * @param DstGCPhys The destination address (guest physical).
3483 * @param val Value
3484 */
3485void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3486{
3487 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3488 VBOX_CHECK_ADDR(DstGCPhys);
3489 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3490 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3491#ifdef VBOX_DEBUG_PHYS
3492 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3493#endif
3494}
3495
3496#undef LOG_GROUP
3497#define LOG_GROUP LOG_GROUP_REM_MMIO
3498
3499/** Read MMIO memory. */
3500static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3501{
3502 uint32_t u32 = 0;
3503 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3504 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3505 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3506 return u32;
3507}
3508
3509/** Read MMIO memory. */
3510static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3511{
3512 uint32_t u32 = 0;
3513 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3514 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3515 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3516 return u32;
3517}
3518
3519/** Read MMIO memory. */
3520static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3521{
3522 uint32_t u32 = 0;
3523 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3524 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3525 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3526 return u32;
3527}
3528
3529/** Write to MMIO memory. */
3530static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3531{
3532 int rc;
3533 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3534 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3535 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3536}
3537
3538/** Write to MMIO memory. */
3539static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3540{
3541 int rc;
3542 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3543 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3544 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3545}
3546
3547/** Write to MMIO memory. */
3548static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3549{
3550 int rc;
3551 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3552 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3553 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3554}
3555
3556
3557#undef LOG_GROUP
3558#define LOG_GROUP LOG_GROUP_REM_HANDLER
3559
3560/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3561
3562static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3563{
3564 uint8_t u8;
3565 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3566 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3567 return u8;
3568}
3569
3570static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3571{
3572 uint16_t u16;
3573 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3574 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3575 return u16;
3576}
3577
3578static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3579{
3580 uint32_t u32;
3581 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3582 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3583 return u32;
3584}
3585
3586static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3587{
3588 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3589 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3590}
3591
3592static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3593{
3594 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3595 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3596}
3597
3598static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3599{
3600 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3601 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3602}
3603
3604/* -+- disassembly -+- */
3605
3606#undef LOG_GROUP
3607#define LOG_GROUP LOG_GROUP_REM_DISAS
3608
3609
3610/**
3611 * Enables or disables singled stepped disassembly.
3612 *
3613 * @returns VBox status code.
3614 * @param pVM VM handle.
3615 * @param fEnable To enable set this flag, to disable clear it.
3616 */
3617static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3618{
3619 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3620 VM_ASSERT_EMT(pVM);
3621
3622 if (fEnable)
3623 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3624 else
3625 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3626 return VINF_SUCCESS;
3627}
3628
3629
3630/**
3631 * Enables or disables singled stepped disassembly.
3632 *
3633 * @returns VBox status code.
3634 * @param pVM VM handle.
3635 * @param fEnable To enable set this flag, to disable clear it.
3636 */
3637REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3638{
3639 PVMREQ pReq;
3640 int rc;
3641
3642 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3643 if (VM_IS_EMT(pVM))
3644 return remR3DisasEnableStepping(pVM, fEnable);
3645
3646 rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3647 AssertRC(rc);
3648 if (RT_SUCCESS(rc))
3649 rc = pReq->iStatus;
3650 VMR3ReqFree(pReq);
3651 return rc;
3652}
3653
3654
3655#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3656/**
3657 * External Debugger Command: .remstep [on|off|1|0]
3658 */
3659static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3660{
3661 bool fEnable;
3662 int rc;
3663
3664 /* print status */
3665 if (cArgs == 0)
3666 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3667 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3668
3669 /* convert the argument and change the mode. */
3670 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3671 if (RT_FAILURE(rc))
3672 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3673 rc = REMR3DisasEnableStepping(pVM, fEnable);
3674 if (RT_FAILURE(rc))
3675 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3676 return rc;
3677}
3678#endif
3679
3680
3681/**
3682 * Disassembles one instruction and prints it to the log.
3683 *
3684 * @returns Success indicator.
3685 * @param env Pointer to the recompiler CPU structure.
3686 * @param f32BitCode Indicates that whether or not the code should
3687 * be disassembled as 16 or 32 bit. If -1 the CS
3688 * selector will be inspected.
3689 * @param pszPrefix
3690 */
3691bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3692{
3693 PVM pVM = env->pVM;
3694 const bool fLog = LogIsEnabled();
3695 const bool fLog2 = LogIs2Enabled();
3696 int rc = VINF_SUCCESS;
3697
3698 /*
3699 * Don't bother if there ain't any log output to do.
3700 */
3701 if (!fLog && !fLog2)
3702 return true;
3703
3704 /*
3705 * Update the state so DBGF reads the correct register values.
3706 */
3707 remR3StateUpdate(pVM, env->pVCpu);
3708
3709 /*
3710 * Log registers if requested.
3711 */
3712 if (!fLog2)
3713 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3714
3715 /*
3716 * Disassemble to log.
3717 */
3718 if (fLog)
3719 rc = DBGFR3DisasInstrCurrentLogInternal(env->pVCpu, pszPrefix);
3720
3721 return RT_SUCCESS(rc);
3722}
3723
3724
3725/**
3726 * Disassemble recompiled code.
3727 *
3728 * @param phFileIgnored Ignored, logfile usually.
3729 * @param pvCode Pointer to the code block.
3730 * @param cb Size of the code block.
3731 */
3732void disas(FILE *phFile, void *pvCode, unsigned long cb)
3733{
3734#ifdef DEBUG_TMP_LOGGING
3735# define DISAS_PRINTF(x...) fprintf(phFile, x)
3736#else
3737# define DISAS_PRINTF(x...) RTLogPrintf(x)
3738 if (LogIs2Enabled())
3739#endif
3740 {
3741 unsigned off = 0;
3742 char szOutput[256];
3743 DISCPUSTATE Cpu;
3744
3745 memset(&Cpu, 0, sizeof(Cpu));
3746#ifdef RT_ARCH_X86
3747 Cpu.mode = CPUMODE_32BIT;
3748#else
3749 Cpu.mode = CPUMODE_64BIT;
3750#endif
3751
3752 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3753 while (off < cb)
3754 {
3755 uint32_t cbInstr;
3756 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3757 DISAS_PRINTF("%s", szOutput);
3758 else
3759 {
3760 DISAS_PRINTF("disas error\n");
3761 cbInstr = 1;
3762#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3763 break;
3764#endif
3765 }
3766 off += cbInstr;
3767 }
3768 }
3769
3770#undef DISAS_PRINTF
3771}
3772
3773
3774/**
3775 * Disassemble guest code.
3776 *
3777 * @param phFileIgnored Ignored, logfile usually.
3778 * @param uCode The guest address of the code to disassemble. (flat?)
3779 * @param cb Number of bytes to disassemble.
3780 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3781 */
3782void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3783{
3784#ifdef DEBUG_TMP_LOGGING
3785# define DISAS_PRINTF(x...) fprintf(phFile, x)
3786#else
3787# define DISAS_PRINTF(x...) RTLogPrintf(x)
3788 if (LogIs2Enabled())
3789#endif
3790 {
3791 PVM pVM = cpu_single_env->pVM;
3792 PVMCPU pVCpu = cpu_single_env->pVCpu;
3793 RTSEL cs;
3794 RTGCUINTPTR eip;
3795
3796 Assert(pVCpu);
3797
3798 /*
3799 * Update the state so DBGF reads the correct register values (flags).
3800 */
3801 remR3StateUpdate(pVM, pVCpu);
3802
3803 /*
3804 * Do the disassembling.
3805 */
3806 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3807 cs = cpu_single_env->segs[R_CS].selector;
3808 eip = uCode - cpu_single_env->segs[R_CS].base;
3809 for (;;)
3810 {
3811 char szBuf[256];
3812 uint32_t cbInstr;
3813 int rc = DBGFR3DisasInstrEx(pVM,
3814 pVCpu->idCpu,
3815 cs,
3816 eip,
3817 0,
3818 szBuf, sizeof(szBuf),
3819 &cbInstr);
3820 if (RT_SUCCESS(rc))
3821 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3822 else
3823 {
3824 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3825 cbInstr = 1;
3826 }
3827
3828 /* next */
3829 if (cb <= cbInstr)
3830 break;
3831 cb -= cbInstr;
3832 uCode += cbInstr;
3833 eip += cbInstr;
3834 }
3835 }
3836#undef DISAS_PRINTF
3837}
3838
3839
3840/**
3841 * Looks up a guest symbol.
3842 *
3843 * @returns Pointer to symbol name. This is a static buffer.
3844 * @param orig_addr The address in question.
3845 */
3846const char *lookup_symbol(target_ulong orig_addr)
3847{
3848 RTGCINTPTR off = 0;
3849 DBGFSYMBOL Sym;
3850 PVM pVM = cpu_single_env->pVM;
3851 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3852 if (RT_SUCCESS(rc))
3853 {
3854 static char szSym[sizeof(Sym.szName) + 48];
3855 if (!off)
3856 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3857 else if (off > 0)
3858 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3859 else
3860 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3861 return szSym;
3862 }
3863 return "<N/A>";
3864}
3865
3866
3867#undef LOG_GROUP
3868#define LOG_GROUP LOG_GROUP_REM
3869
3870
3871/* -+- FF notifications -+- */
3872
3873
3874/**
3875 * Notification about a pending interrupt.
3876 *
3877 * @param pVM VM Handle.
3878 * @param pVCpu VMCPU Handle.
3879 * @param u8Interrupt Interrupt
3880 * @thread The emulation thread.
3881 */
3882REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3883{
3884 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3885 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3886}
3887
3888/**
3889 * Notification about a pending interrupt.
3890 *
3891 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3892 * @param pVM VM Handle.
3893 * @param pVCpu VMCPU Handle.
3894 * @thread The emulation thread.
3895 */
3896REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3897{
3898 return pVM->rem.s.u32PendingInterrupt;
3899}
3900
3901/**
3902 * Notification about the interrupt FF being set.
3903 *
3904 * @param pVM VM Handle.
3905 * @param pVCpu VMCPU Handle.
3906 * @thread The emulation thread.
3907 */
3908REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3909{
3910 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3911 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3912 if (pVM->rem.s.fInREM)
3913 {
3914 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3915 CPU_INTERRUPT_EXTERNAL_HARD);
3916 }
3917}
3918
3919
3920/**
3921 * Notification about the interrupt FF being set.
3922 *
3923 * @param pVM VM Handle.
3924 * @param pVCpu VMCPU Handle.
3925 * @thread Any.
3926 */
3927REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3928{
3929 LogFlow(("REMR3NotifyInterruptClear:\n"));
3930 if (pVM->rem.s.fInREM)
3931 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3932}
3933
3934
3935/**
3936 * Notification about pending timer(s).
3937 *
3938 * @param pVM VM Handle.
3939 * @param pVCpuDst The target cpu for this notification.
3940 * TM will not broadcast pending timer events, but use
3941 * a decidated EMT for them. So, only interrupt REM
3942 * execution if the given CPU is executing in REM.
3943 * @thread Any.
3944 */
3945REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3946{
3947#ifndef DEBUG_bird
3948 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3949#endif
3950 if (pVM->rem.s.fInREM)
3951 {
3952 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3953 {
3954 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3955 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3956 CPU_INTERRUPT_EXTERNAL_TIMER);
3957 }
3958 else
3959 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3960 }
3961 else
3962 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3963}
3964
3965
3966/**
3967 * Notification about pending DMA transfers.
3968 *
3969 * @param pVM VM Handle.
3970 * @thread Any.
3971 */
3972REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3973{
3974 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3975 if (pVM->rem.s.fInREM)
3976 {
3977 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3978 CPU_INTERRUPT_EXTERNAL_DMA);
3979 }
3980}
3981
3982
3983/**
3984 * Notification about pending timer(s).
3985 *
3986 * @param pVM VM Handle.
3987 * @thread Any.
3988 */
3989REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3990{
3991 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3992 if (pVM->rem.s.fInREM)
3993 {
3994 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3995 CPU_INTERRUPT_EXTERNAL_EXIT);
3996 }
3997}
3998
3999
4000/**
4001 * Notification about pending FF set by an external thread.
4002 *
4003 * @param pVM VM handle.
4004 * @thread Any.
4005 */
4006REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4007{
4008 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4009 if (pVM->rem.s.fInREM)
4010 {
4011 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4012 CPU_INTERRUPT_EXTERNAL_EXIT);
4013 }
4014}
4015
4016
4017#ifdef VBOX_WITH_STATISTICS
4018void remR3ProfileStart(int statcode)
4019{
4020 STAMPROFILEADV *pStat;
4021 switch(statcode)
4022 {
4023 case STATS_EMULATE_SINGLE_INSTR:
4024 pStat = &gStatExecuteSingleInstr;
4025 break;
4026 case STATS_QEMU_COMPILATION:
4027 pStat = &gStatCompilationQEmu;
4028 break;
4029 case STATS_QEMU_RUN_EMULATED_CODE:
4030 pStat = &gStatRunCodeQEmu;
4031 break;
4032 case STATS_QEMU_TOTAL:
4033 pStat = &gStatTotalTimeQEmu;
4034 break;
4035 case STATS_QEMU_RUN_TIMERS:
4036 pStat = &gStatTimers;
4037 break;
4038 case STATS_TLB_LOOKUP:
4039 pStat= &gStatTBLookup;
4040 break;
4041 case STATS_IRQ_HANDLING:
4042 pStat= &gStatIRQ;
4043 break;
4044 case STATS_RAW_CHECK:
4045 pStat = &gStatRawCheck;
4046 break;
4047
4048 default:
4049 AssertMsgFailed(("unknown stat %d\n", statcode));
4050 return;
4051 }
4052 STAM_PROFILE_ADV_START(pStat, a);
4053}
4054
4055
4056void remR3ProfileStop(int statcode)
4057{
4058 STAMPROFILEADV *pStat;
4059 switch(statcode)
4060 {
4061 case STATS_EMULATE_SINGLE_INSTR:
4062 pStat = &gStatExecuteSingleInstr;
4063 break;
4064 case STATS_QEMU_COMPILATION:
4065 pStat = &gStatCompilationQEmu;
4066 break;
4067 case STATS_QEMU_RUN_EMULATED_CODE:
4068 pStat = &gStatRunCodeQEmu;
4069 break;
4070 case STATS_QEMU_TOTAL:
4071 pStat = &gStatTotalTimeQEmu;
4072 break;
4073 case STATS_QEMU_RUN_TIMERS:
4074 pStat = &gStatTimers;
4075 break;
4076 case STATS_TLB_LOOKUP:
4077 pStat= &gStatTBLookup;
4078 break;
4079 case STATS_IRQ_HANDLING:
4080 pStat= &gStatIRQ;
4081 break;
4082 case STATS_RAW_CHECK:
4083 pStat = &gStatRawCheck;
4084 break;
4085 default:
4086 AssertMsgFailed(("unknown stat %d\n", statcode));
4087 return;
4088 }
4089 STAM_PROFILE_ADV_STOP(pStat, a);
4090}
4091#endif
4092
4093/**
4094 * Raise an RC, force rem exit.
4095 *
4096 * @param pVM VM handle.
4097 * @param rc The rc.
4098 */
4099void remR3RaiseRC(PVM pVM, int rc)
4100{
4101 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4102 Assert(pVM->rem.s.fInREM);
4103 VM_ASSERT_EMT(pVM);
4104 pVM->rem.s.rc = rc;
4105 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4106}
4107
4108
4109/* -+- timers -+- */
4110
4111uint64_t cpu_get_tsc(CPUX86State *env)
4112{
4113 STAM_COUNTER_INC(&gStatCpuGetTSC);
4114 return TMCpuTickGet(env->pVCpu);
4115}
4116
4117
4118/* -+- interrupts -+- */
4119
4120void cpu_set_ferr(CPUX86State *env)
4121{
4122 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4123 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4124}
4125
4126int cpu_get_pic_interrupt(CPUState *env)
4127{
4128 uint8_t u8Interrupt;
4129 int rc;
4130
4131 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4132 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4133 * with the (a)pic.
4134 */
4135 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4136 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4137 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4138 * remove this kludge. */
4139 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4140 {
4141 rc = VINF_SUCCESS;
4142 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4143 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4144 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4145 }
4146 else
4147 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4148
4149 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4150 if (RT_SUCCESS(rc))
4151 {
4152 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4153 env->interrupt_request |= CPU_INTERRUPT_HARD;
4154 return u8Interrupt;
4155 }
4156 return -1;
4157}
4158
4159
4160/* -+- local apic -+- */
4161
4162void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4163{
4164 int rc = PDMApicSetBase(env->pVM, val);
4165 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4166}
4167
4168uint64_t cpu_get_apic_base(CPUX86State *env)
4169{
4170 uint64_t u64;
4171 int rc = PDMApicGetBase(env->pVM, &u64);
4172 if (RT_SUCCESS(rc))
4173 {
4174 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4175 return u64;
4176 }
4177 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4178 return 0;
4179}
4180
4181void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4182{
4183 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4184 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4185}
4186
4187uint8_t cpu_get_apic_tpr(CPUX86State *env)
4188{
4189 uint8_t u8;
4190 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4191 if (RT_SUCCESS(rc))
4192 {
4193 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4194 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4195 }
4196 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4197 return 0;
4198}
4199
4200
4201uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4202{
4203 uint64_t value;
4204 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4205 if (RT_SUCCESS(rc))
4206 {
4207 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4208 return value;
4209 }
4210 /** @todo: exception ? */
4211 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4212 return value;
4213}
4214
4215void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4216{
4217 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4218 /** @todo: exception if error ? */
4219 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4220}
4221
4222uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4223{
4224 Assert(env->pVCpu);
4225 return CPUMGetGuestMsr(env->pVCpu, msr);
4226}
4227
4228void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4229{
4230 Assert(env->pVCpu);
4231 CPUMSetGuestMsr(env->pVCpu, msr, val);
4232}
4233
4234/* -+- I/O Ports -+- */
4235
4236#undef LOG_GROUP
4237#define LOG_GROUP LOG_GROUP_REM_IOPORT
4238
4239void cpu_outb(CPUState *env, int addr, int val)
4240{
4241 int rc;
4242
4243 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4244 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4245
4246 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4247 if (RT_LIKELY(rc == VINF_SUCCESS))
4248 return;
4249 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4250 {
4251 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4252 remR3RaiseRC(env->pVM, rc);
4253 return;
4254 }
4255 remAbort(rc, __FUNCTION__);
4256}
4257
4258void cpu_outw(CPUState *env, int addr, int val)
4259{
4260 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4261 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4262 if (RT_LIKELY(rc == VINF_SUCCESS))
4263 return;
4264 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4265 {
4266 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4267 remR3RaiseRC(env->pVM, rc);
4268 return;
4269 }
4270 remAbort(rc, __FUNCTION__);
4271}
4272
4273void cpu_outl(CPUState *env, int addr, int val)
4274{
4275 int rc;
4276 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4277 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4278 if (RT_LIKELY(rc == VINF_SUCCESS))
4279 return;
4280 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4281 {
4282 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4283 remR3RaiseRC(env->pVM, rc);
4284 return;
4285 }
4286 remAbort(rc, __FUNCTION__);
4287}
4288
4289int cpu_inb(CPUState *env, int addr)
4290{
4291 uint32_t u32 = 0;
4292 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4293 if (RT_LIKELY(rc == VINF_SUCCESS))
4294 {
4295 if (/*addr != 0x61 && */addr != 0x71)
4296 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4297 return (int)u32;
4298 }
4299 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4300 {
4301 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4302 remR3RaiseRC(env->pVM, rc);
4303 return (int)u32;
4304 }
4305 remAbort(rc, __FUNCTION__);
4306 return 0xff;
4307}
4308
4309int cpu_inw(CPUState *env, int addr)
4310{
4311 uint32_t u32 = 0;
4312 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4313 if (RT_LIKELY(rc == VINF_SUCCESS))
4314 {
4315 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4316 return (int)u32;
4317 }
4318 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4319 {
4320 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4321 remR3RaiseRC(env->pVM, rc);
4322 return (int)u32;
4323 }
4324 remAbort(rc, __FUNCTION__);
4325 return 0xffff;
4326}
4327
4328int cpu_inl(CPUState *env, int addr)
4329{
4330 uint32_t u32 = 0;
4331 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4332 if (RT_LIKELY(rc == VINF_SUCCESS))
4333 {
4334//if (addr==0x01f0 && u32 == 0x6b6d)
4335// loglevel = ~0;
4336 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4337 return (int)u32;
4338 }
4339 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4340 {
4341 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4342 remR3RaiseRC(env->pVM, rc);
4343 return (int)u32;
4344 }
4345 remAbort(rc, __FUNCTION__);
4346 return 0xffffffff;
4347}
4348
4349#undef LOG_GROUP
4350#define LOG_GROUP LOG_GROUP_REM
4351
4352
4353/* -+- helpers and misc other interfaces -+- */
4354
4355/**
4356 * Perform the CPUID instruction.
4357 *
4358 * ASMCpuId cannot be invoked from some source files where this is used because of global
4359 * register allocations.
4360 *
4361 * @param env Pointer to the recompiler CPU structure.
4362 * @param uOperator CPUID operation (eax).
4363 * @param pvEAX Where to store eax.
4364 * @param pvEBX Where to store ebx.
4365 * @param pvECX Where to store ecx.
4366 * @param pvEDX Where to store edx.
4367 */
4368void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4369{
4370 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4371}
4372
4373
4374#if 0 /* not used */
4375/**
4376 * Interface for qemu hardware to report back fatal errors.
4377 */
4378void hw_error(const char *pszFormat, ...)
4379{
4380 /*
4381 * Bitch about it.
4382 */
4383 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4384 * this in my Odin32 tree at home! */
4385 va_list args;
4386 va_start(args, pszFormat);
4387 RTLogPrintf("fatal error in virtual hardware:");
4388 RTLogPrintfV(pszFormat, args);
4389 va_end(args);
4390 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4391
4392 /*
4393 * If we're in REM context we'll sync back the state before 'jumping' to
4394 * the EMs failure handling.
4395 */
4396 PVM pVM = cpu_single_env->pVM;
4397 if (pVM->rem.s.fInREM)
4398 REMR3StateBack(pVM);
4399 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4400 AssertMsgFailed(("EMR3FatalError returned!\n"));
4401}
4402#endif
4403
4404/**
4405 * Interface for the qemu cpu to report unhandled situation
4406 * raising a fatal VM error.
4407 */
4408void cpu_abort(CPUState *env, const char *pszFormat, ...)
4409{
4410 va_list va;
4411 PVM pVM;
4412 PVMCPU pVCpu;
4413 char szMsg[256];
4414
4415 /*
4416 * Bitch about it.
4417 */
4418 RTLogFlags(NULL, "nodisabled nobuffered");
4419 RTLogFlush(NULL);
4420
4421 va_start(va, pszFormat);
4422#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4423 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4424 unsigned cArgs = 0;
4425 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4426 const char *psz = strchr(pszFormat, '%');
4427 while (psz && cArgs < 6)
4428 {
4429 auArgs[cArgs++] = va_arg(va, uintptr_t);
4430 psz = strchr(psz + 1, '%');
4431 }
4432 switch (cArgs)
4433 {
4434 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4435 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4436 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4437 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4438 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4439 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4440 default:
4441 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4442 }
4443#else
4444 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4445#endif
4446 va_end(va);
4447
4448 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4449 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4450
4451 /*
4452 * If we're in REM context we'll sync back the state before 'jumping' to
4453 * the EMs failure handling.
4454 */
4455 pVM = cpu_single_env->pVM;
4456 pVCpu = cpu_single_env->pVCpu;
4457 Assert(pVCpu);
4458
4459 if (pVM->rem.s.fInREM)
4460 REMR3StateBack(pVM, pVCpu);
4461 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4462 AssertMsgFailed(("EMR3FatalError returned!\n"));
4463}
4464
4465
4466/**
4467 * Aborts the VM.
4468 *
4469 * @param rc VBox error code.
4470 * @param pszTip Hint about why/when this happend.
4471 */
4472void remAbort(int rc, const char *pszTip)
4473{
4474 PVM pVM;
4475 PVMCPU pVCpu;
4476
4477 /*
4478 * Bitch about it.
4479 */
4480 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4481 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4482
4483 /*
4484 * Jump back to where we entered the recompiler.
4485 */
4486 pVM = cpu_single_env->pVM;
4487 pVCpu = cpu_single_env->pVCpu;
4488 Assert(pVCpu);
4489
4490 if (pVM->rem.s.fInREM)
4491 REMR3StateBack(pVM, pVCpu);
4492
4493 EMR3FatalError(pVCpu, rc);
4494 AssertMsgFailed(("EMR3FatalError returned!\n"));
4495}
4496
4497
4498/**
4499 * Dumps a linux system call.
4500 * @param pVCpu VMCPU handle.
4501 */
4502void remR3DumpLnxSyscall(PVMCPU pVCpu)
4503{
4504 static const char *apsz[] =
4505 {
4506 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4507 "sys_exit",
4508 "sys_fork",
4509 "sys_read",
4510 "sys_write",
4511 "sys_open", /* 5 */
4512 "sys_close",
4513 "sys_waitpid",
4514 "sys_creat",
4515 "sys_link",
4516 "sys_unlink", /* 10 */
4517 "sys_execve",
4518 "sys_chdir",
4519 "sys_time",
4520 "sys_mknod",
4521 "sys_chmod", /* 15 */
4522 "sys_lchown16",
4523 "sys_ni_syscall", /* old break syscall holder */
4524 "sys_stat",
4525 "sys_lseek",
4526 "sys_getpid", /* 20 */
4527 "sys_mount",
4528 "sys_oldumount",
4529 "sys_setuid16",
4530 "sys_getuid16",
4531 "sys_stime", /* 25 */
4532 "sys_ptrace",
4533 "sys_alarm",
4534 "sys_fstat",
4535 "sys_pause",
4536 "sys_utime", /* 30 */
4537 "sys_ni_syscall", /* old stty syscall holder */
4538 "sys_ni_syscall", /* old gtty syscall holder */
4539 "sys_access",
4540 "sys_nice",
4541 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4542 "sys_sync",
4543 "sys_kill",
4544 "sys_rename",
4545 "sys_mkdir",
4546 "sys_rmdir", /* 40 */
4547 "sys_dup",
4548 "sys_pipe",
4549 "sys_times",
4550 "sys_ni_syscall", /* old prof syscall holder */
4551 "sys_brk", /* 45 */
4552 "sys_setgid16",
4553 "sys_getgid16",
4554 "sys_signal",
4555 "sys_geteuid16",
4556 "sys_getegid16", /* 50 */
4557 "sys_acct",
4558 "sys_umount", /* recycled never used phys() */
4559 "sys_ni_syscall", /* old lock syscall holder */
4560 "sys_ioctl",
4561 "sys_fcntl", /* 55 */
4562 "sys_ni_syscall", /* old mpx syscall holder */
4563 "sys_setpgid",
4564 "sys_ni_syscall", /* old ulimit syscall holder */
4565 "sys_olduname",
4566 "sys_umask", /* 60 */
4567 "sys_chroot",
4568 "sys_ustat",
4569 "sys_dup2",
4570 "sys_getppid",
4571 "sys_getpgrp", /* 65 */
4572 "sys_setsid",
4573 "sys_sigaction",
4574 "sys_sgetmask",
4575 "sys_ssetmask",
4576 "sys_setreuid16", /* 70 */
4577 "sys_setregid16",
4578 "sys_sigsuspend",
4579 "sys_sigpending",
4580 "sys_sethostname",
4581 "sys_setrlimit", /* 75 */
4582 "sys_old_getrlimit",
4583 "sys_getrusage",
4584 "sys_gettimeofday",
4585 "sys_settimeofday",
4586 "sys_getgroups16", /* 80 */
4587 "sys_setgroups16",
4588 "old_select",
4589 "sys_symlink",
4590 "sys_lstat",
4591 "sys_readlink", /* 85 */
4592 "sys_uselib",
4593 "sys_swapon",
4594 "sys_reboot",
4595 "old_readdir",
4596 "old_mmap", /* 90 */
4597 "sys_munmap",
4598 "sys_truncate",
4599 "sys_ftruncate",
4600 "sys_fchmod",
4601 "sys_fchown16", /* 95 */
4602 "sys_getpriority",
4603 "sys_setpriority",
4604 "sys_ni_syscall", /* old profil syscall holder */
4605 "sys_statfs",
4606 "sys_fstatfs", /* 100 */
4607 "sys_ioperm",
4608 "sys_socketcall",
4609 "sys_syslog",
4610 "sys_setitimer",
4611 "sys_getitimer", /* 105 */
4612 "sys_newstat",
4613 "sys_newlstat",
4614 "sys_newfstat",
4615 "sys_uname",
4616 "sys_iopl", /* 110 */
4617 "sys_vhangup",
4618 "sys_ni_syscall", /* old "idle" system call */
4619 "sys_vm86old",
4620 "sys_wait4",
4621 "sys_swapoff", /* 115 */
4622 "sys_sysinfo",
4623 "sys_ipc",
4624 "sys_fsync",
4625 "sys_sigreturn",
4626 "sys_clone", /* 120 */
4627 "sys_setdomainname",
4628 "sys_newuname",
4629 "sys_modify_ldt",
4630 "sys_adjtimex",
4631 "sys_mprotect", /* 125 */
4632 "sys_sigprocmask",
4633 "sys_ni_syscall", /* old "create_module" */
4634 "sys_init_module",
4635 "sys_delete_module",
4636 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4637 "sys_quotactl",
4638 "sys_getpgid",
4639 "sys_fchdir",
4640 "sys_bdflush",
4641 "sys_sysfs", /* 135 */
4642 "sys_personality",
4643 "sys_ni_syscall", /* reserved for afs_syscall */
4644 "sys_setfsuid16",
4645 "sys_setfsgid16",
4646 "sys_llseek", /* 140 */
4647 "sys_getdents",
4648 "sys_select",
4649 "sys_flock",
4650 "sys_msync",
4651 "sys_readv", /* 145 */
4652 "sys_writev",
4653 "sys_getsid",
4654 "sys_fdatasync",
4655 "sys_sysctl",
4656 "sys_mlock", /* 150 */
4657 "sys_munlock",
4658 "sys_mlockall",
4659 "sys_munlockall",
4660 "sys_sched_setparam",
4661 "sys_sched_getparam", /* 155 */
4662 "sys_sched_setscheduler",
4663 "sys_sched_getscheduler",
4664 "sys_sched_yield",
4665 "sys_sched_get_priority_max",
4666 "sys_sched_get_priority_min", /* 160 */
4667 "sys_sched_rr_get_interval",
4668 "sys_nanosleep",
4669 "sys_mremap",
4670 "sys_setresuid16",
4671 "sys_getresuid16", /* 165 */
4672 "sys_vm86",
4673 "sys_ni_syscall", /* Old sys_query_module */
4674 "sys_poll",
4675 "sys_nfsservctl",
4676 "sys_setresgid16", /* 170 */
4677 "sys_getresgid16",
4678 "sys_prctl",
4679 "sys_rt_sigreturn",
4680 "sys_rt_sigaction",
4681 "sys_rt_sigprocmask", /* 175 */
4682 "sys_rt_sigpending",
4683 "sys_rt_sigtimedwait",
4684 "sys_rt_sigqueueinfo",
4685 "sys_rt_sigsuspend",
4686 "sys_pread64", /* 180 */
4687 "sys_pwrite64",
4688 "sys_chown16",
4689 "sys_getcwd",
4690 "sys_capget",
4691 "sys_capset", /* 185 */
4692 "sys_sigaltstack",
4693 "sys_sendfile",
4694 "sys_ni_syscall", /* reserved for streams1 */
4695 "sys_ni_syscall", /* reserved for streams2 */
4696 "sys_vfork", /* 190 */
4697 "sys_getrlimit",
4698 "sys_mmap2",
4699 "sys_truncate64",
4700 "sys_ftruncate64",
4701 "sys_stat64", /* 195 */
4702 "sys_lstat64",
4703 "sys_fstat64",
4704 "sys_lchown",
4705 "sys_getuid",
4706 "sys_getgid", /* 200 */
4707 "sys_geteuid",
4708 "sys_getegid",
4709 "sys_setreuid",
4710 "sys_setregid",
4711 "sys_getgroups", /* 205 */
4712 "sys_setgroups",
4713 "sys_fchown",
4714 "sys_setresuid",
4715 "sys_getresuid",
4716 "sys_setresgid", /* 210 */
4717 "sys_getresgid",
4718 "sys_chown",
4719 "sys_setuid",
4720 "sys_setgid",
4721 "sys_setfsuid", /* 215 */
4722 "sys_setfsgid",
4723 "sys_pivot_root",
4724 "sys_mincore",
4725 "sys_madvise",
4726 "sys_getdents64", /* 220 */
4727 "sys_fcntl64",
4728 "sys_ni_syscall", /* reserved for TUX */
4729 "sys_ni_syscall",
4730 "sys_gettid",
4731 "sys_readahead", /* 225 */
4732 "sys_setxattr",
4733 "sys_lsetxattr",
4734 "sys_fsetxattr",
4735 "sys_getxattr",
4736 "sys_lgetxattr", /* 230 */
4737 "sys_fgetxattr",
4738 "sys_listxattr",
4739 "sys_llistxattr",
4740 "sys_flistxattr",
4741 "sys_removexattr", /* 235 */
4742 "sys_lremovexattr",
4743 "sys_fremovexattr",
4744 "sys_tkill",
4745 "sys_sendfile64",
4746 "sys_futex", /* 240 */
4747 "sys_sched_setaffinity",
4748 "sys_sched_getaffinity",
4749 "sys_set_thread_area",
4750 "sys_get_thread_area",
4751 "sys_io_setup", /* 245 */
4752 "sys_io_destroy",
4753 "sys_io_getevents",
4754 "sys_io_submit",
4755 "sys_io_cancel",
4756 "sys_fadvise64", /* 250 */
4757 "sys_ni_syscall",
4758 "sys_exit_group",
4759 "sys_lookup_dcookie",
4760 "sys_epoll_create",
4761 "sys_epoll_ctl", /* 255 */
4762 "sys_epoll_wait",
4763 "sys_remap_file_pages",
4764 "sys_set_tid_address",
4765 "sys_timer_create",
4766 "sys_timer_settime", /* 260 */
4767 "sys_timer_gettime",
4768 "sys_timer_getoverrun",
4769 "sys_timer_delete",
4770 "sys_clock_settime",
4771 "sys_clock_gettime", /* 265 */
4772 "sys_clock_getres",
4773 "sys_clock_nanosleep",
4774 "sys_statfs64",
4775 "sys_fstatfs64",
4776 "sys_tgkill", /* 270 */
4777 "sys_utimes",
4778 "sys_fadvise64_64",
4779 "sys_ni_syscall" /* sys_vserver */
4780 };
4781
4782 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4783 switch (uEAX)
4784 {
4785 default:
4786 if (uEAX < RT_ELEMENTS(apsz))
4787 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4788 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4789 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4790 else
4791 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4792 break;
4793
4794 }
4795}
4796
4797
4798/**
4799 * Dumps an OpenBSD system call.
4800 * @param pVCpu VMCPU handle.
4801 */
4802void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4803{
4804 static const char *apsz[] =
4805 {
4806 "SYS_syscall", //0
4807 "SYS_exit", //1
4808 "SYS_fork", //2
4809 "SYS_read", //3
4810 "SYS_write", //4
4811 "SYS_open", //5
4812 "SYS_close", //6
4813 "SYS_wait4", //7
4814 "SYS_8",
4815 "SYS_link", //9
4816 "SYS_unlink", //10
4817 "SYS_11",
4818 "SYS_chdir", //12
4819 "SYS_fchdir", //13
4820 "SYS_mknod", //14
4821 "SYS_chmod", //15
4822 "SYS_chown", //16
4823 "SYS_break", //17
4824 "SYS_18",
4825 "SYS_19",
4826 "SYS_getpid", //20
4827 "SYS_mount", //21
4828 "SYS_unmount", //22
4829 "SYS_setuid", //23
4830 "SYS_getuid", //24
4831 "SYS_geteuid", //25
4832 "SYS_ptrace", //26
4833 "SYS_recvmsg", //27
4834 "SYS_sendmsg", //28
4835 "SYS_recvfrom", //29
4836 "SYS_accept", //30
4837 "SYS_getpeername", //31
4838 "SYS_getsockname", //32
4839 "SYS_access", //33
4840 "SYS_chflags", //34
4841 "SYS_fchflags", //35
4842 "SYS_sync", //36
4843 "SYS_kill", //37
4844 "SYS_38",
4845 "SYS_getppid", //39
4846 "SYS_40",
4847 "SYS_dup", //41
4848 "SYS_opipe", //42
4849 "SYS_getegid", //43
4850 "SYS_profil", //44
4851 "SYS_ktrace", //45
4852 "SYS_sigaction", //46
4853 "SYS_getgid", //47
4854 "SYS_sigprocmask", //48
4855 "SYS_getlogin", //49
4856 "SYS_setlogin", //50
4857 "SYS_acct", //51
4858 "SYS_sigpending", //52
4859 "SYS_osigaltstack", //53
4860 "SYS_ioctl", //54
4861 "SYS_reboot", //55
4862 "SYS_revoke", //56
4863 "SYS_symlink", //57
4864 "SYS_readlink", //58
4865 "SYS_execve", //59
4866 "SYS_umask", //60
4867 "SYS_chroot", //61
4868 "SYS_62",
4869 "SYS_63",
4870 "SYS_64",
4871 "SYS_65",
4872 "SYS_vfork", //66
4873 "SYS_67",
4874 "SYS_68",
4875 "SYS_sbrk", //69
4876 "SYS_sstk", //70
4877 "SYS_61",
4878 "SYS_vadvise", //72
4879 "SYS_munmap", //73
4880 "SYS_mprotect", //74
4881 "SYS_madvise", //75
4882 "SYS_76",
4883 "SYS_77",
4884 "SYS_mincore", //78
4885 "SYS_getgroups", //79
4886 "SYS_setgroups", //80
4887 "SYS_getpgrp", //81
4888 "SYS_setpgid", //82
4889 "SYS_setitimer", //83
4890 "SYS_84",
4891 "SYS_85",
4892 "SYS_getitimer", //86
4893 "SYS_87",
4894 "SYS_88",
4895 "SYS_89",
4896 "SYS_dup2", //90
4897 "SYS_91",
4898 "SYS_fcntl", //92
4899 "SYS_select", //93
4900 "SYS_94",
4901 "SYS_fsync", //95
4902 "SYS_setpriority", //96
4903 "SYS_socket", //97
4904 "SYS_connect", //98
4905 "SYS_99",
4906 "SYS_getpriority", //100
4907 "SYS_101",
4908 "SYS_102",
4909 "SYS_sigreturn", //103
4910 "SYS_bind", //104
4911 "SYS_setsockopt", //105
4912 "SYS_listen", //106
4913 "SYS_107",
4914 "SYS_108",
4915 "SYS_109",
4916 "SYS_110",
4917 "SYS_sigsuspend", //111
4918 "SYS_112",
4919 "SYS_113",
4920 "SYS_114",
4921 "SYS_115",
4922 "SYS_gettimeofday", //116
4923 "SYS_getrusage", //117
4924 "SYS_getsockopt", //118
4925 "SYS_119",
4926 "SYS_readv", //120
4927 "SYS_writev", //121
4928 "SYS_settimeofday", //122
4929 "SYS_fchown", //123
4930 "SYS_fchmod", //124
4931 "SYS_125",
4932 "SYS_setreuid", //126
4933 "SYS_setregid", //127
4934 "SYS_rename", //128
4935 "SYS_129",
4936 "SYS_130",
4937 "SYS_flock", //131
4938 "SYS_mkfifo", //132
4939 "SYS_sendto", //133
4940 "SYS_shutdown", //134
4941 "SYS_socketpair", //135
4942 "SYS_mkdir", //136
4943 "SYS_rmdir", //137
4944 "SYS_utimes", //138
4945 "SYS_139",
4946 "SYS_adjtime", //140
4947 "SYS_141",
4948 "SYS_142",
4949 "SYS_143",
4950 "SYS_144",
4951 "SYS_145",
4952 "SYS_146",
4953 "SYS_setsid", //147
4954 "SYS_quotactl", //148
4955 "SYS_149",
4956 "SYS_150",
4957 "SYS_151",
4958 "SYS_152",
4959 "SYS_153",
4960 "SYS_154",
4961 "SYS_nfssvc", //155
4962 "SYS_156",
4963 "SYS_157",
4964 "SYS_158",
4965 "SYS_159",
4966 "SYS_160",
4967 "SYS_getfh", //161
4968 "SYS_162",
4969 "SYS_163",
4970 "SYS_164",
4971 "SYS_sysarch", //165
4972 "SYS_166",
4973 "SYS_167",
4974 "SYS_168",
4975 "SYS_169",
4976 "SYS_170",
4977 "SYS_171",
4978 "SYS_172",
4979 "SYS_pread", //173
4980 "SYS_pwrite", //174
4981 "SYS_175",
4982 "SYS_176",
4983 "SYS_177",
4984 "SYS_178",
4985 "SYS_179",
4986 "SYS_180",
4987 "SYS_setgid", //181
4988 "SYS_setegid", //182
4989 "SYS_seteuid", //183
4990 "SYS_lfs_bmapv", //184
4991 "SYS_lfs_markv", //185
4992 "SYS_lfs_segclean", //186
4993 "SYS_lfs_segwait", //187
4994 "SYS_188",
4995 "SYS_189",
4996 "SYS_190",
4997 "SYS_pathconf", //191
4998 "SYS_fpathconf", //192
4999 "SYS_swapctl", //193
5000 "SYS_getrlimit", //194
5001 "SYS_setrlimit", //195
5002 "SYS_getdirentries", //196
5003 "SYS_mmap", //197
5004 "SYS___syscall", //198
5005 "SYS_lseek", //199
5006 "SYS_truncate", //200
5007 "SYS_ftruncate", //201
5008 "SYS___sysctl", //202
5009 "SYS_mlock", //203
5010 "SYS_munlock", //204
5011 "SYS_205",
5012 "SYS_futimes", //206
5013 "SYS_getpgid", //207
5014 "SYS_xfspioctl", //208
5015 "SYS_209",
5016 "SYS_210",
5017 "SYS_211",
5018 "SYS_212",
5019 "SYS_213",
5020 "SYS_214",
5021 "SYS_215",
5022 "SYS_216",
5023 "SYS_217",
5024 "SYS_218",
5025 "SYS_219",
5026 "SYS_220",
5027 "SYS_semget", //221
5028 "SYS_222",
5029 "SYS_223",
5030 "SYS_224",
5031 "SYS_msgget", //225
5032 "SYS_msgsnd", //226
5033 "SYS_msgrcv", //227
5034 "SYS_shmat", //228
5035 "SYS_229",
5036 "SYS_shmdt", //230
5037 "SYS_231",
5038 "SYS_clock_gettime", //232
5039 "SYS_clock_settime", //233
5040 "SYS_clock_getres", //234
5041 "SYS_235",
5042 "SYS_236",
5043 "SYS_237",
5044 "SYS_238",
5045 "SYS_239",
5046 "SYS_nanosleep", //240
5047 "SYS_241",
5048 "SYS_242",
5049 "SYS_243",
5050 "SYS_244",
5051 "SYS_245",
5052 "SYS_246",
5053 "SYS_247",
5054 "SYS_248",
5055 "SYS_249",
5056 "SYS_minherit", //250
5057 "SYS_rfork", //251
5058 "SYS_poll", //252
5059 "SYS_issetugid", //253
5060 "SYS_lchown", //254
5061 "SYS_getsid", //255
5062 "SYS_msync", //256
5063 "SYS_257",
5064 "SYS_258",
5065 "SYS_259",
5066 "SYS_getfsstat", //260
5067 "SYS_statfs", //261
5068 "SYS_fstatfs", //262
5069 "SYS_pipe", //263
5070 "SYS_fhopen", //264
5071 "SYS_265",
5072 "SYS_fhstatfs", //266
5073 "SYS_preadv", //267
5074 "SYS_pwritev", //268
5075 "SYS_kqueue", //269
5076 "SYS_kevent", //270
5077 "SYS_mlockall", //271
5078 "SYS_munlockall", //272
5079 "SYS_getpeereid", //273
5080 "SYS_274",
5081 "SYS_275",
5082 "SYS_276",
5083 "SYS_277",
5084 "SYS_278",
5085 "SYS_279",
5086 "SYS_280",
5087 "SYS_getresuid", //281
5088 "SYS_setresuid", //282
5089 "SYS_getresgid", //283
5090 "SYS_setresgid", //284
5091 "SYS_285",
5092 "SYS_mquery", //286
5093 "SYS_closefrom", //287
5094 "SYS_sigaltstack", //288
5095 "SYS_shmget", //289
5096 "SYS_semop", //290
5097 "SYS_stat", //291
5098 "SYS_fstat", //292
5099 "SYS_lstat", //293
5100 "SYS_fhstat", //294
5101 "SYS___semctl", //295
5102 "SYS_shmctl", //296
5103 "SYS_msgctl", //297
5104 "SYS_MAXSYSCALL", //298
5105 //299
5106 //300
5107 };
5108 uint32_t uEAX;
5109 if (!LogIsEnabled())
5110 return;
5111 uEAX = CPUMGetGuestEAX(pVCpu);
5112 switch (uEAX)
5113 {
5114 default:
5115 if (uEAX < RT_ELEMENTS(apsz))
5116 {
5117 uint32_t au32Args[8] = {0};
5118 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5119 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5120 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5121 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5122 }
5123 else
5124 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5125 break;
5126 }
5127}
5128
5129
5130#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5131/**
5132 * The Dll main entry point (stub).
5133 */
5134bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5135{
5136 return true;
5137}
5138
5139void *memcpy(void *dst, const void *src, size_t size)
5140{
5141 uint8_t*pbDst = dst, *pbSrc = src;
5142 while (size-- > 0)
5143 *pbDst++ = *pbSrc++;
5144 return dst;
5145}
5146
5147#endif
5148
5149void cpu_smm_update(CPUState *env)
5150{
5151}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette