VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 29331

Last change on this file since 29331 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 169.0 KB
Line 
1/* $Id: VBoxRecompiler.c 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include "vl.h"
24#include "osdep.h"
25#include "exec-all.h"
26#include "config.h"
27#include "cpu-all.h"
28
29#include <VBox/rem.h>
30#include <VBox/vmapi.h>
31#include <VBox/tm.h>
32#include <VBox/ssm.h>
33#include <VBox/em.h>
34#include <VBox/trpm.h>
35#include <VBox/iom.h>
36#include <VBox/mm.h>
37#include <VBox/pgm.h>
38#include <VBox/pdm.h>
39#include <VBox/dbgf.h>
40#include <VBox/dbg.h>
41#include <VBox/hwaccm.h>
42#include <VBox/patm.h>
43#include <VBox/csam.h>
44#include "REMInternal.h"
45#include <VBox/vm.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49#include <VBox/log.h>
50#include <iprt/semaphore.h>
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/thread.h>
54#include <iprt/string.h>
55
56/* Don't wanna include everything. */
57extern void cpu_exec_init_all(unsigned long tb_size);
58extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
59extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
60extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
61extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
62extern void tlb_flush(CPUState *env, int flush_global);
63extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
64extern void sync_ldtr(CPUX86State *env1, int selector);
65
66#ifdef VBOX_STRICT
67unsigned long get_phys_page_offset(target_ulong addr);
68#endif
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75/** Copy 80-bit fpu register at pSrc to pDst.
76 * This is probably faster than *calling* memcpy.
77 */
78#define REM_COPY_FPU_REG(pDst, pSrc) \
79 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
80
81
82/*******************************************************************************
83* Internal Functions *
84*******************************************************************************/
85static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
88static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
89
90static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
91static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
92static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
93static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
94static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
95static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
96
97static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
98static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
99static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
100static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
101static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
102static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
103
104static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
105static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
106static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111
112/** @todo Move stats to REM::s some rainy day we have nothing do to. */
113#ifdef VBOX_WITH_STATISTICS
114static STAMPROFILEADV gStatExecuteSingleInstr;
115static STAMPROFILEADV gStatCompilationQEmu;
116static STAMPROFILEADV gStatRunCodeQEmu;
117static STAMPROFILEADV gStatTotalTimeQEmu;
118static STAMPROFILEADV gStatTimers;
119static STAMPROFILEADV gStatTBLookup;
120static STAMPROFILEADV gStatIRQ;
121static STAMPROFILEADV gStatRawCheck;
122static STAMPROFILEADV gStatMemRead;
123static STAMPROFILEADV gStatMemWrite;
124static STAMPROFILE gStatGCPhys2HCVirt;
125static STAMPROFILE gStatHCVirt2GCPhys;
126static STAMCOUNTER gStatCpuGetTSC;
127static STAMCOUNTER gStatRefuseTFInhibit;
128static STAMCOUNTER gStatRefuseVM86;
129static STAMCOUNTER gStatRefusePaging;
130static STAMCOUNTER gStatRefusePAE;
131static STAMCOUNTER gStatRefuseIOPLNot0;
132static STAMCOUNTER gStatRefuseIF0;
133static STAMCOUNTER gStatRefuseCode16;
134static STAMCOUNTER gStatRefuseWP0;
135static STAMCOUNTER gStatRefuseRing1or2;
136static STAMCOUNTER gStatRefuseCanExecute;
137static STAMCOUNTER gStatREMGDTChange;
138static STAMCOUNTER gStatREMIDTChange;
139static STAMCOUNTER gStatREMLDTRChange;
140static STAMCOUNTER gStatREMTRChange;
141static STAMCOUNTER gStatSelOutOfSync[6];
142static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
143static STAMCOUNTER gStatFlushTBs;
144#endif
145/* in exec.c */
146extern uint32_t tlb_flush_count;
147extern uint32_t tb_flush_count;
148extern uint32_t tb_phys_invalidate_count;
149
150/*
151 * Global stuff.
152 */
153
154/** MMIO read callbacks. */
155CPUReadMemoryFunc *g_apfnMMIORead[3] =
156{
157 remR3MMIOReadU8,
158 remR3MMIOReadU16,
159 remR3MMIOReadU32
160};
161
162/** MMIO write callbacks. */
163CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
164{
165 remR3MMIOWriteU8,
166 remR3MMIOWriteU16,
167 remR3MMIOWriteU32
168};
169
170/** Handler read callbacks. */
171CPUReadMemoryFunc *g_apfnHandlerRead[3] =
172{
173 remR3HandlerReadU8,
174 remR3HandlerReadU16,
175 remR3HandlerReadU32
176};
177
178/** Handler write callbacks. */
179CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
180{
181 remR3HandlerWriteU8,
182 remR3HandlerWriteU16,
183 remR3HandlerWriteU32
184};
185
186
187#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
188/*
189 * Debugger commands.
190 */
191static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
192
193/** '.remstep' arguments. */
194static const DBGCVARDESC g_aArgRemStep[] =
195{
196 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
197 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
198};
199
200/** Command descriptors. */
201static const DBGCCMD g_aCmds[] =
202{
203 {
204 .pszCmd ="remstep",
205 .cArgsMin = 0,
206 .cArgsMax = 1,
207 .paArgDescs = &g_aArgRemStep[0],
208 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
209 .pResultDesc = NULL,
210 .fFlags = 0,
211 .pfnHandler = remR3CmdDisasEnableStepping,
212 .pszSyntax = "[on/off]",
213 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
214 "If no arguments show the current state."
215 }
216};
217#endif
218
219/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
220uint8_t *code_gen_prologue;
221
222
223/*******************************************************************************
224* Internal Functions *
225*******************************************************************************/
226void remAbort(int rc, const char *pszTip);
227extern int testmath(void);
228
229/* Put them here to avoid unused variable warning. */
230AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
231#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
232//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
233/* Why did this have to be identical?? */
234AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
235#else
236AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
237#endif
238
239
240/**
241 * Initializes the REM.
242 *
243 * @returns VBox status code.
244 * @param pVM The VM to operate on.
245 */
246REMR3DECL(int) REMR3Init(PVM pVM)
247{
248 PREMHANDLERNOTIFICATION pCur;
249 uint32_t u32Dummy;
250 int rc;
251 unsigned i;
252
253#ifdef VBOX_ENABLE_VBOXREM64
254 LogRel(("Using 64-bit aware REM\n"));
255#endif
256
257 /*
258 * Assert sanity.
259 */
260 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
261 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
262 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
263#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
264 Assert(!testmath());
265#endif
266
267 /*
268 * Init some internal data members.
269 */
270 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
271 pVM->rem.s.Env.pVM = pVM;
272#ifdef CPU_RAW_MODE_INIT
273 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
274#endif
275
276 /*
277 * Initialize the REM critical section.
278 *
279 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
280 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
281 * deadlocks. (mostly pgm vs rem locking)
282 */
283 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
284 AssertRCReturn(rc, rc);
285
286 /* ctx. */
287 pVM->rem.s.pCtx = NULL; /* set when executing code. */
288 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
289
290 /* ignore all notifications */
291 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
292
293 code_gen_prologue = RTMemExecAlloc(_1K);
294 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
295
296 cpu_exec_init_all(0);
297
298 /*
299 * Init the recompiler.
300 */
301 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
302 {
303 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
304 return VERR_GENERAL_FAILURE;
305 }
306 PVMCPU pVCpu = VMMGetCpu(pVM);
307 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
308 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
309
310 /* allocate code buffer for single instruction emulation. */
311 pVM->rem.s.Env.cbCodeBuffer = 4096;
312 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
313 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
314
315 /* finally, set the cpu_single_env global. */
316 cpu_single_env = &pVM->rem.s.Env;
317
318 /* Nothing is pending by default */
319 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
320
321 /*
322 * Register ram types.
323 */
324 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
325 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
326 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
327 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
328 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
329
330 /* stop ignoring. */
331 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
332
333 /*
334 * Register the saved state data unit.
335 */
336 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
337 NULL, NULL, NULL,
338 NULL, remR3Save, NULL,
339 NULL, remR3Load, NULL);
340 if (RT_FAILURE(rc))
341 return rc;
342
343#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
344 /*
345 * Debugger commands.
346 */
347 static bool fRegisteredCmds = false;
348 if (!fRegisteredCmds)
349 {
350 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
351 if (RT_SUCCESS(rc))
352 fRegisteredCmds = true;
353 }
354#endif
355
356#ifdef VBOX_WITH_STATISTICS
357 /*
358 * Statistics.
359 */
360 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
361 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
362 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
363 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
364 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
365 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
366 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
369 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
370 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
371 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
372
373 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
374
375 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
376 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
377 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
378 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
379 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
380 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
381 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
382 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
383 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
384 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
385 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
386
387 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
388 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
389 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
390 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
391
392 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
393 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
394 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
398
399 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
400 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
405
406 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
407#endif /* VBOX_WITH_STATISTICS */
408
409 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
410 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
411 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
412
413
414#ifdef DEBUG_ALL_LOGGING
415 loglevel = ~0;
416# ifdef DEBUG_TMP_LOGGING
417 logfile = fopen("/tmp/vbox-qemu.log", "w");
418# endif
419#endif
420
421 /*
422 * Init the handler notification lists.
423 */
424 pVM->rem.s.idxPendingList = UINT32_MAX;
425 pVM->rem.s.idxFreeList = 0;
426
427 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
428 {
429 pCur = &pVM->rem.s.aHandlerNotifications[i];
430 pCur->idxNext = i + 1;
431 pCur->idxSelf = i;
432 }
433 pCur->idxNext = UINT32_MAX; /* the last record. */
434
435 return rc;
436}
437
438
439/**
440 * Finalizes the REM initialization.
441 *
442 * This is called after all components, devices and drivers has
443 * been initialized. Its main purpose it to finish the RAM related
444 * initialization.
445 *
446 * @returns VBox status code.
447 *
448 * @param pVM The VM handle.
449 */
450REMR3DECL(int) REMR3InitFinalize(PVM pVM)
451{
452 int rc;
453
454 /*
455 * Ram size & dirty bit map.
456 */
457 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
458 pVM->rem.s.fGCPhysLastRamFixed = true;
459#ifdef RT_STRICT
460 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
461#else
462 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
463#endif
464 return rc;
465}
466
467
468/**
469 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
470 *
471 * @returns VBox status code.
472 * @param pVM The VM handle.
473 * @param fGuarded Whether to guard the map.
474 */
475static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
476{
477 int rc = VINF_SUCCESS;
478 RTGCPHYS cb;
479
480 cb = pVM->rem.s.GCPhysLastRam + 1;
481 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
482 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
483 VERR_OUT_OF_RANGE);
484 phys_ram_size = cb;
485 phys_ram_dirty_size = cb >> PAGE_SHIFT;
486 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
487
488 if (!fGuarded)
489 {
490 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
491 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
492 }
493 else
494 {
495 /*
496 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
497 */
498 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
499 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
500 if (cbBitmapFull == cbBitmapAligned)
501 cbBitmapFull += _4G >> PAGE_SHIFT;
502 else if (cbBitmapFull - cbBitmapAligned < _64K)
503 cbBitmapFull += _64K;
504
505 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
506 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
507
508 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
509 if (RT_FAILURE(rc))
510 {
511 RTMemPageFree(phys_ram_dirty, cbBitmapFull);
512 AssertLogRelRCReturn(rc, rc);
513 }
514
515 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
516 }
517
518 /* initialize it. */
519 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
520 return rc;
521}
522
523
524/**
525 * Terminates the REM.
526 *
527 * Termination means cleaning up and freeing all resources,
528 * the VM it self is at this point powered off or suspended.
529 *
530 * @returns VBox status code.
531 * @param pVM The VM to operate on.
532 */
533REMR3DECL(int) REMR3Term(PVM pVM)
534{
535#ifdef VBOX_WITH_STATISTICS
536 /*
537 * Statistics.
538 */
539 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
540 STAM_DEREG(pVM, &gStatCompilationQEmu);
541 STAM_DEREG(pVM, &gStatRunCodeQEmu);
542 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
543 STAM_DEREG(pVM, &gStatTimers);
544 STAM_DEREG(pVM, &gStatTBLookup);
545 STAM_DEREG(pVM, &gStatIRQ);
546 STAM_DEREG(pVM, &gStatRawCheck);
547 STAM_DEREG(pVM, &gStatMemRead);
548 STAM_DEREG(pVM, &gStatMemWrite);
549 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
550 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
551
552 STAM_DEREG(pVM, &gStatCpuGetTSC);
553
554 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
555 STAM_DEREG(pVM, &gStatRefuseVM86);
556 STAM_DEREG(pVM, &gStatRefusePaging);
557 STAM_DEREG(pVM, &gStatRefusePAE);
558 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
559 STAM_DEREG(pVM, &gStatRefuseIF0);
560 STAM_DEREG(pVM, &gStatRefuseCode16);
561 STAM_DEREG(pVM, &gStatRefuseWP0);
562 STAM_DEREG(pVM, &gStatRefuseRing1or2);
563 STAM_DEREG(pVM, &gStatRefuseCanExecute);
564 STAM_DEREG(pVM, &gStatFlushTBs);
565
566 STAM_DEREG(pVM, &gStatREMGDTChange);
567 STAM_DEREG(pVM, &gStatREMLDTRChange);
568 STAM_DEREG(pVM, &gStatREMIDTChange);
569 STAM_DEREG(pVM, &gStatREMTRChange);
570
571 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
572 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
573 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
574 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
575 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
577
578 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
579 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
580 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
581 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
584
585 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
586#endif /* VBOX_WITH_STATISTICS */
587
588 STAM_REL_DEREG(pVM, &tb_flush_count);
589 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
590 STAM_REL_DEREG(pVM, &tlb_flush_count);
591
592 return VINF_SUCCESS;
593}
594
595
596/**
597 * The VM is being reset.
598 *
599 * For the REM component this means to call the cpu_reset() and
600 * reinitialize some state variables.
601 *
602 * @param pVM VM handle.
603 */
604REMR3DECL(void) REMR3Reset(PVM pVM)
605{
606 /*
607 * Reset the REM cpu.
608 */
609 Assert(pVM->rem.s.cIgnoreAll == 0);
610 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
611 cpu_reset(&pVM->rem.s.Env);
612 pVM->rem.s.cInvalidatedPages = 0;
613 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
614 Assert(pVM->rem.s.cIgnoreAll == 0);
615
616 /* Clear raw ring 0 init state */
617 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
618
619 /* Flush the TBs the next time we execute code here. */
620 pVM->rem.s.fFlushTBs = true;
621}
622
623
624/**
625 * Execute state save operation.
626 *
627 * @returns VBox status code.
628 * @param pVM VM Handle.
629 * @param pSSM SSM operation handle.
630 */
631static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
632{
633 PREM pRem = &pVM->rem.s;
634
635 /*
636 * Save the required CPU Env bits.
637 * (Not much because we're never in REM when doing the save.)
638 */
639 LogFlow(("remR3Save:\n"));
640 Assert(!pRem->fInREM);
641 SSMR3PutU32(pSSM, pRem->Env.hflags);
642 SSMR3PutU32(pSSM, ~0); /* separator */
643
644 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
645 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
646 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
647
648 return SSMR3PutU32(pSSM, ~0); /* terminator */
649}
650
651
652/**
653 * Execute state load operation.
654 *
655 * @returns VBox status code.
656 * @param pVM VM Handle.
657 * @param pSSM SSM operation handle.
658 * @param uVersion Data layout version.
659 * @param uPass The data pass.
660 */
661static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
662{
663 uint32_t u32Dummy;
664 uint32_t fRawRing0 = false;
665 uint32_t u32Sep;
666 uint32_t i;
667 int rc;
668 PREM pRem;
669
670 LogFlow(("remR3Load:\n"));
671 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
672
673 /*
674 * Validate version.
675 */
676 if ( uVersion != REM_SAVED_STATE_VERSION
677 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
678 {
679 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
680 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
681 }
682
683 /*
684 * Do a reset to be on the safe side...
685 */
686 REMR3Reset(pVM);
687
688 /*
689 * Ignore all ignorable notifications.
690 * (Not doing this will cause serious trouble.)
691 */
692 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
693
694 /*
695 * Load the required CPU Env bits.
696 * (Not much because we're never in REM when doing the save.)
697 */
698 pRem = &pVM->rem.s;
699 Assert(!pRem->fInREM);
700 SSMR3GetU32(pSSM, &pRem->Env.hflags);
701 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
702 {
703 /* Redundant REM CPU state has to be loaded, but can be ignored. */
704 CPUX86State_Ver16 temp;
705 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
706 }
707
708 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
709 if (RT_FAILURE(rc))
710 return rc;
711 if (u32Sep != ~0U)
712 {
713 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
714 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
715 }
716
717 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
718 SSMR3GetUInt(pSSM, &fRawRing0);
719 if (fRawRing0)
720 pRem->Env.state |= CPU_RAW_RING0;
721
722 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
723 {
724 /*
725 * Load the REM stuff.
726 */
727 /** @todo r=bird: We should just drop all these items, restoring doesn't make
728 * sense. */
729 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
730 if (RT_FAILURE(rc))
731 return rc;
732 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
733 {
734 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
735 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
736 }
737 for (i = 0; i < pRem->cInvalidatedPages; i++)
738 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
739 }
740
741 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
742 if (RT_FAILURE(rc))
743 return rc;
744
745 /* check the terminator. */
746 rc = SSMR3GetU32(pSSM, &u32Sep);
747 if (RT_FAILURE(rc))
748 return rc;
749 if (u32Sep != ~0U)
750 {
751 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
752 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
753 }
754
755 /*
756 * Get the CPUID features.
757 */
758 PVMCPU pVCpu = VMMGetCpu(pVM);
759 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
760 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
761
762 /*
763 * Sync the Load Flush the TLB
764 */
765 tlb_flush(&pRem->Env, 1);
766
767 /*
768 * Stop ignoring ignornable notifications.
769 */
770 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
771
772 /*
773 * Sync the whole CPU state when executing code in the recompiler.
774 */
775 for (i = 0; i < pVM->cCpus; i++)
776 {
777 PVMCPU pVCpu = &pVM->aCpus[i];
778 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
779 }
780 return VINF_SUCCESS;
781}
782
783
784
785#undef LOG_GROUP
786#define LOG_GROUP LOG_GROUP_REM_RUN
787
788/**
789 * Single steps an instruction in recompiled mode.
790 *
791 * Before calling this function the REM state needs to be in sync with
792 * the VM. Call REMR3State() to perform the sync. It's only necessary
793 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
794 * and after calling REMR3StateBack().
795 *
796 * @returns VBox status code.
797 *
798 * @param pVM VM Handle.
799 * @param pVCpu VMCPU Handle.
800 */
801REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
802{
803 int rc, interrupt_request;
804 RTGCPTR GCPtrPC;
805 bool fBp;
806
807 /*
808 * Lock the REM - we don't wanna have anyone interrupting us
809 * while stepping - and enabled single stepping. We also ignore
810 * pending interrupts and suchlike.
811 */
812 interrupt_request = pVM->rem.s.Env.interrupt_request;
813 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
814 pVM->rem.s.Env.interrupt_request = 0;
815 cpu_single_step(&pVM->rem.s.Env, 1);
816
817 /*
818 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
819 */
820 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
821 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
822
823 /*
824 * Execute and handle the return code.
825 * We execute without enabling the cpu tick, so on success we'll
826 * just flip it on and off to make sure it moves
827 */
828 rc = cpu_exec(&pVM->rem.s.Env);
829 if (rc == EXCP_DEBUG)
830 {
831 TMR3NotifyResume(pVM, pVCpu);
832 TMR3NotifySuspend(pVM, pVCpu);
833 rc = VINF_EM_DBG_STEPPED;
834 }
835 else
836 {
837 switch (rc)
838 {
839 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
840 case EXCP_HLT:
841 case EXCP_HALTED: rc = VINF_EM_HALT; break;
842 case EXCP_RC:
843 rc = pVM->rem.s.rc;
844 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
845 break;
846 case EXCP_EXECUTE_RAW:
847 case EXCP_EXECUTE_HWACC:
848 /** @todo: is it correct? No! */
849 rc = VINF_SUCCESS;
850 break;
851 default:
852 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
853 rc = VERR_INTERNAL_ERROR;
854 break;
855 }
856 }
857
858 /*
859 * Restore the stuff we changed to prevent interruption.
860 * Unlock the REM.
861 */
862 if (fBp)
863 {
864 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
865 Assert(rc2 == 0); NOREF(rc2);
866 }
867 cpu_single_step(&pVM->rem.s.Env, 0);
868 pVM->rem.s.Env.interrupt_request = interrupt_request;
869
870 return rc;
871}
872
873
874/**
875 * Set a breakpoint using the REM facilities.
876 *
877 * @returns VBox status code.
878 * @param pVM The VM handle.
879 * @param Address The breakpoint address.
880 * @thread The emulation thread.
881 */
882REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
883{
884 VM_ASSERT_EMT(pVM);
885 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
886 {
887 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
888 return VINF_SUCCESS;
889 }
890 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
891 return VERR_REM_NO_MORE_BP_SLOTS;
892}
893
894
895/**
896 * Clears a breakpoint set by REMR3BreakpointSet().
897 *
898 * @returns VBox status code.
899 * @param pVM The VM handle.
900 * @param Address The breakpoint address.
901 * @thread The emulation thread.
902 */
903REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
904{
905 VM_ASSERT_EMT(pVM);
906 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
907 {
908 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
909 return VINF_SUCCESS;
910 }
911 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
912 return VERR_REM_BP_NOT_FOUND;
913}
914
915
916/**
917 * Emulate an instruction.
918 *
919 * This function executes one instruction without letting anyone
920 * interrupt it. This is intended for being called while being in
921 * raw mode and thus will take care of all the state syncing between
922 * REM and the rest.
923 *
924 * @returns VBox status code.
925 * @param pVM VM handle.
926 * @param pVCpu VMCPU Handle.
927 */
928REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
929{
930 bool fFlushTBs;
931
932 int rc, rc2;
933 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
934
935 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
936 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
937 */
938 if (HWACCMIsEnabled(pVM))
939 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
940
941 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
942 fFlushTBs = pVM->rem.s.fFlushTBs;
943 pVM->rem.s.fFlushTBs = false;
944
945 /*
946 * Sync the state and enable single instruction / single stepping.
947 */
948 rc = REMR3State(pVM, pVCpu);
949 pVM->rem.s.fFlushTBs = fFlushTBs;
950 if (RT_SUCCESS(rc))
951 {
952 int interrupt_request = pVM->rem.s.Env.interrupt_request;
953 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
954 Assert(!pVM->rem.s.Env.singlestep_enabled);
955 /*
956 * Now we set the execute single instruction flag and enter the cpu_exec loop.
957 */
958 TMNotifyStartOfExecution(pVCpu);
959 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
960 rc = cpu_exec(&pVM->rem.s.Env);
961 TMNotifyEndOfExecution(pVCpu);
962 switch (rc)
963 {
964 /*
965 * Executed without anything out of the way happening.
966 */
967 case EXCP_SINGLE_INSTR:
968 rc = VINF_EM_RESCHEDULE;
969 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
970 break;
971
972 /*
973 * If we take a trap or start servicing a pending interrupt, we might end up here.
974 * (Timer thread or some other thread wishing EMT's attention.)
975 */
976 case EXCP_INTERRUPT:
977 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
978 rc = VINF_EM_RESCHEDULE;
979 break;
980
981 /*
982 * Single step, we assume!
983 * If there was a breakpoint there we're fucked now.
984 */
985 case EXCP_DEBUG:
986 {
987 /* breakpoint or single step? */
988 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
989 int iBP;
990 rc = VINF_EM_DBG_STEPPED;
991 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
992 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
993 {
994 rc = VINF_EM_DBG_BREAKPOINT;
995 break;
996 }
997 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
998 break;
999 }
1000
1001 /*
1002 * hlt instruction.
1003 */
1004 case EXCP_HLT:
1005 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1006 rc = VINF_EM_HALT;
1007 break;
1008
1009 /*
1010 * The VM has halted.
1011 */
1012 case EXCP_HALTED:
1013 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1014 rc = VINF_EM_HALT;
1015 break;
1016
1017 /*
1018 * Switch to RAW-mode.
1019 */
1020 case EXCP_EXECUTE_RAW:
1021 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1022 rc = VINF_EM_RESCHEDULE_RAW;
1023 break;
1024
1025 /*
1026 * Switch to hardware accelerated RAW-mode.
1027 */
1028 case EXCP_EXECUTE_HWACC:
1029 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1030 rc = VINF_EM_RESCHEDULE_HWACC;
1031 break;
1032
1033 /*
1034 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1035 */
1036 case EXCP_RC:
1037 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1038 rc = pVM->rem.s.rc;
1039 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1040 break;
1041
1042 /*
1043 * Figure out the rest when they arrive....
1044 */
1045 default:
1046 AssertMsgFailed(("rc=%d\n", rc));
1047 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1048 rc = VINF_EM_RESCHEDULE;
1049 break;
1050 }
1051
1052 /*
1053 * Switch back the state.
1054 */
1055 pVM->rem.s.Env.interrupt_request = interrupt_request;
1056 rc2 = REMR3StateBack(pVM, pVCpu);
1057 AssertRC(rc2);
1058 }
1059
1060 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1061 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1062 return rc;
1063}
1064
1065
1066/**
1067 * Runs code in recompiled mode.
1068 *
1069 * Before calling this function the REM state needs to be in sync with
1070 * the VM. Call REMR3State() to perform the sync. It's only necessary
1071 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1072 * and after calling REMR3StateBack().
1073 *
1074 * @returns VBox status code.
1075 *
1076 * @param pVM VM Handle.
1077 * @param pVCpu VMCPU Handle.
1078 */
1079REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1080{
1081 int rc;
1082 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1083 Assert(pVM->rem.s.fInREM);
1084
1085 TMNotifyStartOfExecution(pVCpu);
1086 rc = cpu_exec(&pVM->rem.s.Env);
1087 TMNotifyEndOfExecution(pVCpu);
1088 switch (rc)
1089 {
1090 /*
1091 * This happens when the execution was interrupted
1092 * by an external event, like pending timers.
1093 */
1094 case EXCP_INTERRUPT:
1095 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1096 rc = VINF_SUCCESS;
1097 break;
1098
1099 /*
1100 * hlt instruction.
1101 */
1102 case EXCP_HLT:
1103 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1104 rc = VINF_EM_HALT;
1105 break;
1106
1107 /*
1108 * The VM has halted.
1109 */
1110 case EXCP_HALTED:
1111 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1112 rc = VINF_EM_HALT;
1113 break;
1114
1115 /*
1116 * Breakpoint/single step.
1117 */
1118 case EXCP_DEBUG:
1119 {
1120#if 0//def DEBUG_bird
1121 static int iBP = 0;
1122 printf("howdy, breakpoint! iBP=%d\n", iBP);
1123 switch (iBP)
1124 {
1125 case 0:
1126 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1127 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1128 //pVM->rem.s.Env.interrupt_request = 0;
1129 //pVM->rem.s.Env.exception_index = -1;
1130 //g_fInterruptDisabled = 1;
1131 rc = VINF_SUCCESS;
1132 asm("int3");
1133 break;
1134 default:
1135 asm("int3");
1136 break;
1137 }
1138 iBP++;
1139#else
1140 /* breakpoint or single step? */
1141 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1142 int iBP;
1143 rc = VINF_EM_DBG_STEPPED;
1144 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1145 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1146 {
1147 rc = VINF_EM_DBG_BREAKPOINT;
1148 break;
1149 }
1150 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1151#endif
1152 break;
1153 }
1154
1155 /*
1156 * Switch to RAW-mode.
1157 */
1158 case EXCP_EXECUTE_RAW:
1159 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1160 rc = VINF_EM_RESCHEDULE_RAW;
1161 break;
1162
1163 /*
1164 * Switch to hardware accelerated RAW-mode.
1165 */
1166 case EXCP_EXECUTE_HWACC:
1167 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1168 rc = VINF_EM_RESCHEDULE_HWACC;
1169 break;
1170
1171 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1172 /*
1173 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1174 */
1175 case EXCP_RC:
1176 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1177 rc = pVM->rem.s.rc;
1178 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1179 break;
1180
1181 /*
1182 * Figure out the rest when they arrive....
1183 */
1184 default:
1185 AssertMsgFailed(("rc=%d\n", rc));
1186 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1187 rc = VINF_SUCCESS;
1188 break;
1189 }
1190
1191 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1192 return rc;
1193}
1194
1195
1196/**
1197 * Check if the cpu state is suitable for Raw execution.
1198 *
1199 * @returns boolean
1200 * @param env The CPU env struct.
1201 * @param eip The EIP to check this for (might differ from env->eip).
1202 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1203 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1204 *
1205 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1206 */
1207bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1208{
1209 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1210 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1211 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1212 uint32_t u32CR0;
1213
1214 /* Update counter. */
1215 env->pVM->rem.s.cCanExecuteRaw++;
1216
1217 if (HWACCMIsEnabled(env->pVM))
1218 {
1219 CPUMCTX Ctx;
1220
1221 env->state |= CPU_RAW_HWACC;
1222
1223 /*
1224 * Create partial context for HWACCMR3CanExecuteGuest
1225 */
1226 Ctx.cr0 = env->cr[0];
1227 Ctx.cr3 = env->cr[3];
1228 Ctx.cr4 = env->cr[4];
1229
1230 Ctx.tr = env->tr.selector;
1231 Ctx.trHid.u64Base = env->tr.base;
1232 Ctx.trHid.u32Limit = env->tr.limit;
1233 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1234
1235 Ctx.idtr.cbIdt = env->idt.limit;
1236 Ctx.idtr.pIdt = env->idt.base;
1237
1238 Ctx.gdtr.cbGdt = env->gdt.limit;
1239 Ctx.gdtr.pGdt = env->gdt.base;
1240
1241 Ctx.rsp = env->regs[R_ESP];
1242 Ctx.rip = env->eip;
1243
1244 Ctx.eflags.u32 = env->eflags;
1245
1246 Ctx.cs = env->segs[R_CS].selector;
1247 Ctx.csHid.u64Base = env->segs[R_CS].base;
1248 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1249 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1250
1251 Ctx.ds = env->segs[R_DS].selector;
1252 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1253 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1254 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1255
1256 Ctx.es = env->segs[R_ES].selector;
1257 Ctx.esHid.u64Base = env->segs[R_ES].base;
1258 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1259 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1260
1261 Ctx.fs = env->segs[R_FS].selector;
1262 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1263 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1264 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1265
1266 Ctx.gs = env->segs[R_GS].selector;
1267 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1268 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1269 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1270
1271 Ctx.ss = env->segs[R_SS].selector;
1272 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1273 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1274 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1275
1276 Ctx.msrEFER = env->efer;
1277
1278 /* Hardware accelerated raw-mode:
1279 *
1280 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1281 */
1282 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1283 {
1284 *piException = EXCP_EXECUTE_HWACC;
1285 return true;
1286 }
1287 return false;
1288 }
1289
1290 /*
1291 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1292 * or 32 bits protected mode ring 0 code
1293 *
1294 * The tests are ordered by the likelyhood of being true during normal execution.
1295 */
1296 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1297 {
1298 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1299 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1300 return false;
1301 }
1302
1303#ifndef VBOX_RAW_V86
1304 if (fFlags & VM_MASK) {
1305 STAM_COUNTER_INC(&gStatRefuseVM86);
1306 Log2(("raw mode refused: VM_MASK\n"));
1307 return false;
1308 }
1309#endif
1310
1311 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1312 {
1313#ifndef DEBUG_bird
1314 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1315#endif
1316 return false;
1317 }
1318
1319 if (env->singlestep_enabled)
1320 {
1321 //Log2(("raw mode refused: Single step\n"));
1322 return false;
1323 }
1324
1325 if (env->nb_breakpoints > 0)
1326 {
1327 //Log2(("raw mode refused: Breakpoints\n"));
1328 return false;
1329 }
1330
1331 u32CR0 = env->cr[0];
1332 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1333 {
1334 STAM_COUNTER_INC(&gStatRefusePaging);
1335 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1336 return false;
1337 }
1338
1339 if (env->cr[4] & CR4_PAE_MASK)
1340 {
1341 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1342 {
1343 STAM_COUNTER_INC(&gStatRefusePAE);
1344 return false;
1345 }
1346 }
1347
1348 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1349 {
1350 if (!EMIsRawRing3Enabled(env->pVM))
1351 return false;
1352
1353 if (!(env->eflags & IF_MASK))
1354 {
1355 STAM_COUNTER_INC(&gStatRefuseIF0);
1356 Log2(("raw mode refused: IF (RawR3)\n"));
1357 return false;
1358 }
1359
1360 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1361 {
1362 STAM_COUNTER_INC(&gStatRefuseWP0);
1363 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1364 return false;
1365 }
1366 }
1367 else
1368 {
1369 if (!EMIsRawRing0Enabled(env->pVM))
1370 return false;
1371
1372 // Let's start with pure 32 bits ring 0 code first
1373 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1374 {
1375 STAM_COUNTER_INC(&gStatRefuseCode16);
1376 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1377 return false;
1378 }
1379
1380 // Only R0
1381 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1382 {
1383 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1384 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1385 return false;
1386 }
1387
1388 if (!(u32CR0 & CR0_WP_MASK))
1389 {
1390 STAM_COUNTER_INC(&gStatRefuseWP0);
1391 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1392 return false;
1393 }
1394
1395 if (PATMIsPatchGCAddr(env->pVM, eip))
1396 {
1397 Log2(("raw r0 mode forced: patch code\n"));
1398 *piException = EXCP_EXECUTE_RAW;
1399 return true;
1400 }
1401
1402#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1403 if (!(env->eflags & IF_MASK))
1404 {
1405 STAM_COUNTER_INC(&gStatRefuseIF0);
1406 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1407 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1408 return false;
1409 }
1410#endif
1411
1412 env->state |= CPU_RAW_RING0;
1413 }
1414
1415 /*
1416 * Don't reschedule the first time we're called, because there might be
1417 * special reasons why we're here that is not covered by the above checks.
1418 */
1419 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1420 {
1421 Log2(("raw mode refused: first scheduling\n"));
1422 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1423 return false;
1424 }
1425
1426 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1427 *piException = EXCP_EXECUTE_RAW;
1428 return true;
1429}
1430
1431
1432/**
1433 * Fetches a code byte.
1434 *
1435 * @returns Success indicator (bool) for ease of use.
1436 * @param env The CPU environment structure.
1437 * @param GCPtrInstr Where to fetch code.
1438 * @param pu8Byte Where to store the byte on success
1439 */
1440bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1441{
1442 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1443 if (RT_SUCCESS(rc))
1444 return true;
1445 return false;
1446}
1447
1448
1449/**
1450 * Flush (or invalidate if you like) page table/dir entry.
1451 *
1452 * (invlpg instruction; tlb_flush_page)
1453 *
1454 * @param env Pointer to cpu environment.
1455 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1456 */
1457void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1458{
1459 PVM pVM = env->pVM;
1460 PCPUMCTX pCtx;
1461 int rc;
1462
1463 /*
1464 * When we're replaying invlpg instructions or restoring a saved
1465 * state we disable this path.
1466 */
1467 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1468 return;
1469 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1470 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1471
1472 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1473
1474 /*
1475 * Update the control registers before calling PGMFlushPage.
1476 */
1477 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1478 Assert(pCtx);
1479 pCtx->cr0 = env->cr[0];
1480 pCtx->cr3 = env->cr[3];
1481 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1482 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1483 pCtx->cr4 = env->cr[4];
1484
1485 /*
1486 * Let PGM do the rest.
1487 */
1488 Assert(env->pVCpu);
1489 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1490 if (RT_FAILURE(rc))
1491 {
1492 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1493 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1494 }
1495 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1496}
1497
1498
1499#ifndef REM_PHYS_ADDR_IN_TLB
1500/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1501void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1502{
1503 void *pv;
1504 int rc;
1505
1506 /* Address must be aligned enough to fiddle with lower bits */
1507 Assert((physAddr & 0x3) == 0);
1508
1509 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1510 Assert( rc == VINF_SUCCESS
1511 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1512 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1513 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1514 if (RT_FAILURE(rc))
1515 return (void *)1;
1516 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1517 return (void *)((uintptr_t)pv | 2);
1518 return pv;
1519}
1520#endif /* REM_PHYS_ADDR_IN_TLB */
1521
1522
1523/**
1524 * Called from tlb_protect_code in order to write monitor a code page.
1525 *
1526 * @param env Pointer to the CPU environment.
1527 * @param GCPtr Code page to monitor
1528 */
1529void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1530{
1531#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1532 Assert(env->pVM->rem.s.fInREM);
1533 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1534 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1535 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1536 && !(env->eflags & VM_MASK) /* no V86 mode */
1537 && !HWACCMIsEnabled(env->pVM))
1538 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1539#endif
1540}
1541
1542
1543/**
1544 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1545 *
1546 * @param env Pointer to the CPU environment.
1547 * @param GCPtr Code page to monitor
1548 */
1549void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1550{
1551 Assert(env->pVM->rem.s.fInREM);
1552#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1553 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1554 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1555 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1556 && !(env->eflags & VM_MASK) /* no V86 mode */
1557 && !HWACCMIsEnabled(env->pVM))
1558 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1559#endif
1560}
1561
1562
1563/**
1564 * Called when the CPU is initialized, any of the CRx registers are changed or
1565 * when the A20 line is modified.
1566 *
1567 * @param env Pointer to the CPU environment.
1568 * @param fGlobal Set if the flush is global.
1569 */
1570void remR3FlushTLB(CPUState *env, bool fGlobal)
1571{
1572 PVM pVM = env->pVM;
1573 PCPUMCTX pCtx;
1574
1575 /*
1576 * When we're replaying invlpg instructions or restoring a saved
1577 * state we disable this path.
1578 */
1579 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1580 return;
1581 Assert(pVM->rem.s.fInREM);
1582
1583 /*
1584 * The caller doesn't check cr4, so we have to do that for ourselves.
1585 */
1586 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1587 fGlobal = true;
1588 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1589
1590 /*
1591 * Update the control registers before calling PGMR3FlushTLB.
1592 */
1593 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1594 Assert(pCtx);
1595 pCtx->cr0 = env->cr[0];
1596 pCtx->cr3 = env->cr[3];
1597 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1598 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1599 pCtx->cr4 = env->cr[4];
1600
1601 /*
1602 * Let PGM do the rest.
1603 */
1604 Assert(env->pVCpu);
1605 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1606}
1607
1608
1609/**
1610 * Called when any of the cr0, cr4 or efer registers is updated.
1611 *
1612 * @param env Pointer to the CPU environment.
1613 */
1614void remR3ChangeCpuMode(CPUState *env)
1615{
1616 PVM pVM = env->pVM;
1617 uint64_t efer;
1618 PCPUMCTX pCtx;
1619 int rc;
1620
1621 /*
1622 * When we're replaying loads or restoring a saved
1623 * state this path is disabled.
1624 */
1625 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1626 return;
1627 Assert(pVM->rem.s.fInREM);
1628
1629 /*
1630 * Update the control registers before calling PGMChangeMode()
1631 * as it may need to map whatever cr3 is pointing to.
1632 */
1633 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1634 Assert(pCtx);
1635 pCtx->cr0 = env->cr[0];
1636 pCtx->cr3 = env->cr[3];
1637 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1638 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1639 pCtx->cr4 = env->cr[4];
1640
1641#ifdef TARGET_X86_64
1642 efer = env->efer;
1643#else
1644 efer = 0;
1645#endif
1646 Assert(env->pVCpu);
1647 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1648 if (rc != VINF_SUCCESS)
1649 {
1650 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1651 {
1652 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1653 remR3RaiseRC(env->pVM, rc);
1654 }
1655 else
1656 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1657 }
1658}
1659
1660
1661/**
1662 * Called from compiled code to run dma.
1663 *
1664 * @param env Pointer to the CPU environment.
1665 */
1666void remR3DmaRun(CPUState *env)
1667{
1668 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1669 PDMR3DmaRun(env->pVM);
1670 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1671}
1672
1673
1674/**
1675 * Called from compiled code to schedule pending timers in VMM
1676 *
1677 * @param env Pointer to the CPU environment.
1678 */
1679void remR3TimersRun(CPUState *env)
1680{
1681 LogFlow(("remR3TimersRun:\n"));
1682 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1683 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1684 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1685 TMR3TimerQueuesDo(env->pVM);
1686 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1687 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1688}
1689
1690
1691/**
1692 * Record trap occurance
1693 *
1694 * @returns VBox status code
1695 * @param env Pointer to the CPU environment.
1696 * @param uTrap Trap nr
1697 * @param uErrorCode Error code
1698 * @param pvNextEIP Next EIP
1699 */
1700int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1701{
1702 PVM pVM = env->pVM;
1703#ifdef VBOX_WITH_STATISTICS
1704 static STAMCOUNTER s_aStatTrap[255];
1705 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1706#endif
1707
1708#ifdef VBOX_WITH_STATISTICS
1709 if (uTrap < 255)
1710 {
1711 if (!s_aRegisters[uTrap])
1712 {
1713 char szStatName[64];
1714 s_aRegisters[uTrap] = true;
1715 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1716 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1717 }
1718 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1719 }
1720#endif
1721 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1722 if( uTrap < 0x20
1723 && (env->cr[0] & X86_CR0_PE)
1724 && !(env->eflags & X86_EFL_VM))
1725 {
1726#ifdef DEBUG
1727 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1728#endif
1729 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1730 {
1731 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1732 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1733 return VERR_REM_TOO_MANY_TRAPS;
1734 }
1735 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1736 pVM->rem.s.cPendingExceptions = 1;
1737 pVM->rem.s.uPendingException = uTrap;
1738 pVM->rem.s.uPendingExcptEIP = env->eip;
1739 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1740 }
1741 else
1742 {
1743 pVM->rem.s.cPendingExceptions = 0;
1744 pVM->rem.s.uPendingException = uTrap;
1745 pVM->rem.s.uPendingExcptEIP = env->eip;
1746 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1747 }
1748 return VINF_SUCCESS;
1749}
1750
1751
1752/*
1753 * Clear current active trap
1754 *
1755 * @param pVM VM Handle.
1756 */
1757void remR3TrapClear(PVM pVM)
1758{
1759 pVM->rem.s.cPendingExceptions = 0;
1760 pVM->rem.s.uPendingException = 0;
1761 pVM->rem.s.uPendingExcptEIP = 0;
1762 pVM->rem.s.uPendingExcptCR2 = 0;
1763}
1764
1765
1766/*
1767 * Record previous call instruction addresses
1768 *
1769 * @param env Pointer to the CPU environment.
1770 */
1771void remR3RecordCall(CPUState *env)
1772{
1773 CSAMR3RecordCallAddress(env->pVM, env->eip);
1774}
1775
1776
1777/**
1778 * Syncs the internal REM state with the VM.
1779 *
1780 * This must be called before REMR3Run() is invoked whenever when the REM
1781 * state is not up to date. Calling it several times in a row is not
1782 * permitted.
1783 *
1784 * @returns VBox status code.
1785 *
1786 * @param pVM VM Handle.
1787 * @param pVCpu VMCPU Handle.
1788 *
1789 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1790 * no do this since the majority of the callers don't want any unnecessary of events
1791 * pending that would immediatly interrupt execution.
1792 */
1793REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1794{
1795 register const CPUMCTX *pCtx;
1796 register unsigned fFlags;
1797 bool fHiddenSelRegsValid;
1798 unsigned i;
1799 TRPMEVENT enmType;
1800 uint8_t u8TrapNo;
1801 int rc;
1802
1803 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1804 Log2(("REMR3State:\n"));
1805
1806 pVM->rem.s.Env.pVCpu = pVCpu;
1807 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1808 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1809
1810 Assert(!pVM->rem.s.fInREM);
1811 pVM->rem.s.fInStateSync = true;
1812
1813 /*
1814 * If we have to flush TBs, do that immediately.
1815 */
1816 if (pVM->rem.s.fFlushTBs)
1817 {
1818 STAM_COUNTER_INC(&gStatFlushTBs);
1819 tb_flush(&pVM->rem.s.Env);
1820 pVM->rem.s.fFlushTBs = false;
1821 }
1822
1823 /*
1824 * Copy the registers which require no special handling.
1825 */
1826#ifdef TARGET_X86_64
1827 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1828 Assert(R_EAX == 0);
1829 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1830 Assert(R_ECX == 1);
1831 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1832 Assert(R_EDX == 2);
1833 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1834 Assert(R_EBX == 3);
1835 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1836 Assert(R_ESP == 4);
1837 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1838 Assert(R_EBP == 5);
1839 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1840 Assert(R_ESI == 6);
1841 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1842 Assert(R_EDI == 7);
1843 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1844 pVM->rem.s.Env.regs[8] = pCtx->r8;
1845 pVM->rem.s.Env.regs[9] = pCtx->r9;
1846 pVM->rem.s.Env.regs[10] = pCtx->r10;
1847 pVM->rem.s.Env.regs[11] = pCtx->r11;
1848 pVM->rem.s.Env.regs[12] = pCtx->r12;
1849 pVM->rem.s.Env.regs[13] = pCtx->r13;
1850 pVM->rem.s.Env.regs[14] = pCtx->r14;
1851 pVM->rem.s.Env.regs[15] = pCtx->r15;
1852
1853 pVM->rem.s.Env.eip = pCtx->rip;
1854
1855 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1856#else
1857 Assert(R_EAX == 0);
1858 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1859 Assert(R_ECX == 1);
1860 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1861 Assert(R_EDX == 2);
1862 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1863 Assert(R_EBX == 3);
1864 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1865 Assert(R_ESP == 4);
1866 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1867 Assert(R_EBP == 5);
1868 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1869 Assert(R_ESI == 6);
1870 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1871 Assert(R_EDI == 7);
1872 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1873 pVM->rem.s.Env.eip = pCtx->eip;
1874
1875 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1876#endif
1877
1878 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1879
1880 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1881 for (i=0;i<8;i++)
1882 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1883
1884 /*
1885 * Clear the halted hidden flag (the interrupt waking up the CPU can
1886 * have been dispatched in raw mode).
1887 */
1888 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1889
1890 /*
1891 * Replay invlpg?
1892 */
1893 if (pVM->rem.s.cInvalidatedPages)
1894 {
1895 RTUINT i;
1896
1897 pVM->rem.s.fIgnoreInvlPg = true;
1898 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1899 {
1900 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1901 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1902 }
1903 pVM->rem.s.fIgnoreInvlPg = false;
1904 pVM->rem.s.cInvalidatedPages = 0;
1905 }
1906
1907 /* Replay notification changes. */
1908 REMR3ReplayHandlerNotifications(pVM);
1909
1910 /* Update MSRs; before CRx registers! */
1911 pVM->rem.s.Env.efer = pCtx->msrEFER;
1912 pVM->rem.s.Env.star = pCtx->msrSTAR;
1913 pVM->rem.s.Env.pat = pCtx->msrPAT;
1914#ifdef TARGET_X86_64
1915 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1916 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1917 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1918 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1919
1920 /* Update the internal long mode activate flag according to the new EFER value. */
1921 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1922 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1923 else
1924 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1925#endif
1926
1927 /*
1928 * Registers which are rarely changed and require special handling / order when changed.
1929 */
1930 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1931 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1932 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1933 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1934 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1935 {
1936 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1937 {
1938 pVM->rem.s.fIgnoreCR3Load = true;
1939 tlb_flush(&pVM->rem.s.Env, true);
1940 pVM->rem.s.fIgnoreCR3Load = false;
1941 }
1942
1943 /* CR4 before CR0! */
1944 if (fFlags & CPUM_CHANGED_CR4)
1945 {
1946 pVM->rem.s.fIgnoreCR3Load = true;
1947 pVM->rem.s.fIgnoreCpuMode = true;
1948 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1949 pVM->rem.s.fIgnoreCpuMode = false;
1950 pVM->rem.s.fIgnoreCR3Load = false;
1951 }
1952
1953 if (fFlags & CPUM_CHANGED_CR0)
1954 {
1955 pVM->rem.s.fIgnoreCR3Load = true;
1956 pVM->rem.s.fIgnoreCpuMode = true;
1957 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1958 pVM->rem.s.fIgnoreCpuMode = false;
1959 pVM->rem.s.fIgnoreCR3Load = false;
1960 }
1961
1962 if (fFlags & CPUM_CHANGED_CR3)
1963 {
1964 pVM->rem.s.fIgnoreCR3Load = true;
1965 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1966 pVM->rem.s.fIgnoreCR3Load = false;
1967 }
1968
1969 if (fFlags & CPUM_CHANGED_GDTR)
1970 {
1971 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1972 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1973 }
1974
1975 if (fFlags & CPUM_CHANGED_IDTR)
1976 {
1977 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1978 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1979 }
1980
1981 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1982 {
1983 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1984 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1985 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1986 }
1987
1988 if (fFlags & CPUM_CHANGED_LDTR)
1989 {
1990 if (fHiddenSelRegsValid)
1991 {
1992 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1993 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1994 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1995 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1996 }
1997 else
1998 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1999 }
2000
2001 if (fFlags & CPUM_CHANGED_CPUID)
2002 {
2003 uint32_t u32Dummy;
2004
2005 /*
2006 * Get the CPUID features.
2007 */
2008 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2009 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2010 }
2011
2012 /* Sync FPU state after CR4, CPUID and EFER (!). */
2013 if (fFlags & CPUM_CHANGED_FPU_REM)
2014 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2015 }
2016
2017 /*
2018 * Sync TR unconditionally to make life simpler.
2019 */
2020 pVM->rem.s.Env.tr.selector = pCtx->tr;
2021 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2022 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2023 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2024 /* Note! do_interrupt will fault if the busy flag is still set... */
2025 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2026
2027 /*
2028 * Update selector registers.
2029 * This must be done *after* we've synced gdt, ldt and crX registers
2030 * since we're reading the GDT/LDT om sync_seg. This will happen with
2031 * saved state which takes a quick dip into rawmode for instance.
2032 */
2033 /*
2034 * Stack; Note first check this one as the CPL might have changed. The
2035 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2036 */
2037
2038 if (fHiddenSelRegsValid)
2039 {
2040 /* The hidden selector registers are valid in the CPU context. */
2041 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2042
2043 /* Set current CPL */
2044 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2045
2046 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2047 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2048 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2049 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2050 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2051 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2052 }
2053 else
2054 {
2055 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2056 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2057 {
2058 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2059
2060 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2061 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2062#ifdef VBOX_WITH_STATISTICS
2063 if (pVM->rem.s.Env.segs[R_SS].newselector)
2064 {
2065 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2066 }
2067#endif
2068 }
2069 else
2070 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2071
2072 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2073 {
2074 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2075 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2076#ifdef VBOX_WITH_STATISTICS
2077 if (pVM->rem.s.Env.segs[R_ES].newselector)
2078 {
2079 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2080 }
2081#endif
2082 }
2083 else
2084 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2085
2086 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2087 {
2088 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2089 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2090#ifdef VBOX_WITH_STATISTICS
2091 if (pVM->rem.s.Env.segs[R_CS].newselector)
2092 {
2093 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2094 }
2095#endif
2096 }
2097 else
2098 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2099
2100 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2101 {
2102 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2103 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2104#ifdef VBOX_WITH_STATISTICS
2105 if (pVM->rem.s.Env.segs[R_DS].newselector)
2106 {
2107 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2108 }
2109#endif
2110 }
2111 else
2112 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2113
2114 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2115 * be the same but not the base/limit. */
2116 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2117 {
2118 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2119 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2120#ifdef VBOX_WITH_STATISTICS
2121 if (pVM->rem.s.Env.segs[R_FS].newselector)
2122 {
2123 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2124 }
2125#endif
2126 }
2127 else
2128 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2129
2130 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2131 {
2132 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2133 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2134#ifdef VBOX_WITH_STATISTICS
2135 if (pVM->rem.s.Env.segs[R_GS].newselector)
2136 {
2137 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2138 }
2139#endif
2140 }
2141 else
2142 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2143 }
2144
2145 /*
2146 * Check for traps.
2147 */
2148 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2149 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2150 if (RT_SUCCESS(rc))
2151 {
2152#ifdef DEBUG
2153 if (u8TrapNo == 0x80)
2154 {
2155 remR3DumpLnxSyscall(pVCpu);
2156 remR3DumpOBsdSyscall(pVCpu);
2157 }
2158#endif
2159
2160 pVM->rem.s.Env.exception_index = u8TrapNo;
2161 if (enmType != TRPM_SOFTWARE_INT)
2162 {
2163 pVM->rem.s.Env.exception_is_int = 0;
2164 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2165 }
2166 else
2167 {
2168 /*
2169 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2170 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2171 * for int03 and into.
2172 */
2173 pVM->rem.s.Env.exception_is_int = 1;
2174 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2175 /* int 3 may be generated by one-byte 0xcc */
2176 if (u8TrapNo == 3)
2177 {
2178 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2179 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2180 }
2181 /* int 4 may be generated by one-byte 0xce */
2182 else if (u8TrapNo == 4)
2183 {
2184 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2185 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2186 }
2187 }
2188
2189 /* get error code and cr2 if needed. */
2190 switch (u8TrapNo)
2191 {
2192 case 0x0e:
2193 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2194 /* fallthru */
2195 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2196 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2197 break;
2198
2199 case 0x11: case 0x08:
2200 default:
2201 pVM->rem.s.Env.error_code = 0;
2202 break;
2203 }
2204
2205 /*
2206 * We can now reset the active trap since the recompiler is gonna have a go at it.
2207 */
2208 rc = TRPMResetTrap(pVCpu);
2209 AssertRC(rc);
2210 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2211 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2212 }
2213
2214 /*
2215 * Clear old interrupt request flags; Check for pending hardware interrupts.
2216 * (See @remark for why we don't check for other FFs.)
2217 */
2218 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2219 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2220 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2221 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2222
2223 /*
2224 * We're now in REM mode.
2225 */
2226 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2227 pVM->rem.s.fInREM = true;
2228 pVM->rem.s.fInStateSync = false;
2229 pVM->rem.s.cCanExecuteRaw = 0;
2230 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2231 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2232 return VINF_SUCCESS;
2233}
2234
2235
2236/**
2237 * Syncs back changes in the REM state to the the VM state.
2238 *
2239 * This must be called after invoking REMR3Run().
2240 * Calling it several times in a row is not permitted.
2241 *
2242 * @returns VBox status code.
2243 *
2244 * @param pVM VM Handle.
2245 * @param pVCpu VMCPU Handle.
2246 */
2247REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2248{
2249 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2250 Assert(pCtx);
2251 unsigned i;
2252
2253 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2254 Log2(("REMR3StateBack:\n"));
2255 Assert(pVM->rem.s.fInREM);
2256
2257 /*
2258 * Copy back the registers.
2259 * This is done in the order they are declared in the CPUMCTX structure.
2260 */
2261
2262 /** @todo FOP */
2263 /** @todo FPUIP */
2264 /** @todo CS */
2265 /** @todo FPUDP */
2266 /** @todo DS */
2267
2268 /** @todo check if FPU/XMM was actually used in the recompiler */
2269 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2270//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2271
2272#ifdef TARGET_X86_64
2273 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2274 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2275 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2276 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2277 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2278 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2279 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2280 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2281 pCtx->r8 = pVM->rem.s.Env.regs[8];
2282 pCtx->r9 = pVM->rem.s.Env.regs[9];
2283 pCtx->r10 = pVM->rem.s.Env.regs[10];
2284 pCtx->r11 = pVM->rem.s.Env.regs[11];
2285 pCtx->r12 = pVM->rem.s.Env.regs[12];
2286 pCtx->r13 = pVM->rem.s.Env.regs[13];
2287 pCtx->r14 = pVM->rem.s.Env.regs[14];
2288 pCtx->r15 = pVM->rem.s.Env.regs[15];
2289
2290 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2291
2292#else
2293 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2294 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2295 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2296 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2297 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2298 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2299 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2300
2301 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2302#endif
2303
2304 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2305
2306#ifdef VBOX_WITH_STATISTICS
2307 if (pVM->rem.s.Env.segs[R_SS].newselector)
2308 {
2309 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2310 }
2311 if (pVM->rem.s.Env.segs[R_GS].newselector)
2312 {
2313 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2314 }
2315 if (pVM->rem.s.Env.segs[R_FS].newselector)
2316 {
2317 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2318 }
2319 if (pVM->rem.s.Env.segs[R_ES].newselector)
2320 {
2321 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2322 }
2323 if (pVM->rem.s.Env.segs[R_DS].newselector)
2324 {
2325 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2326 }
2327 if (pVM->rem.s.Env.segs[R_CS].newselector)
2328 {
2329 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2330 }
2331#endif
2332 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2333 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2334 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2335 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2336 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2337
2338#ifdef TARGET_X86_64
2339 pCtx->rip = pVM->rem.s.Env.eip;
2340 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2341#else
2342 pCtx->eip = pVM->rem.s.Env.eip;
2343 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2344#endif
2345
2346 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2347 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2348 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2349 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2350 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2351 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2352
2353 for (i = 0; i < 8; i++)
2354 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2355
2356 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2357 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2358 {
2359 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2360 STAM_COUNTER_INC(&gStatREMGDTChange);
2361 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2362 }
2363
2364 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2365 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2366 {
2367 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2368 STAM_COUNTER_INC(&gStatREMIDTChange);
2369 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2370 }
2371
2372 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2373 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2374 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2375 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2376 {
2377 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2378 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2379 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2380 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2381 STAM_COUNTER_INC(&gStatREMLDTRChange);
2382 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2383 }
2384
2385 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2386 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2387 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2388 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2389 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2390 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2391 : 0) )
2392 {
2393 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2394 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2395 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2396 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2397 pCtx->tr = pVM->rem.s.Env.tr.selector;
2398 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2399 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2400 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2401 if (pCtx->trHid.Attr.u)
2402 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2403 STAM_COUNTER_INC(&gStatREMTRChange);
2404 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2405 }
2406
2407 /** @todo These values could still be out of sync! */
2408 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2409 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2410 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2411 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2412
2413 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2414 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2415 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2416
2417 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2418 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2419 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2420
2421 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2422 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2423 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2424
2425 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2426 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2427 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2428
2429 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2430 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2431 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2432
2433 /* Sysenter MSR */
2434 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2435 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2436 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2437
2438 /* System MSRs. */
2439 pCtx->msrEFER = pVM->rem.s.Env.efer;
2440 pCtx->msrSTAR = pVM->rem.s.Env.star;
2441 pCtx->msrPAT = pVM->rem.s.Env.pat;
2442#ifdef TARGET_X86_64
2443 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2444 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2445 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2446 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2447#endif
2448
2449 remR3TrapClear(pVM);
2450
2451 /*
2452 * Check for traps.
2453 */
2454 if ( pVM->rem.s.Env.exception_index >= 0
2455 && pVM->rem.s.Env.exception_index < 256)
2456 {
2457 int rc;
2458
2459 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2460 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2461 AssertRC(rc);
2462 switch (pVM->rem.s.Env.exception_index)
2463 {
2464 case 0x0e:
2465 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2466 /* fallthru */
2467 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2468 case 0x11: case 0x08: /* 0 */
2469 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2470 break;
2471 }
2472
2473 }
2474
2475 /*
2476 * We're not longer in REM mode.
2477 */
2478 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2479 pVM->rem.s.fInREM = false;
2480 pVM->rem.s.pCtx = NULL;
2481 pVM->rem.s.Env.pVCpu = NULL;
2482 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2483 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2484 return VINF_SUCCESS;
2485}
2486
2487
2488/**
2489 * This is called by the disassembler when it wants to update the cpu state
2490 * before for instance doing a register dump.
2491 */
2492static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2493{
2494 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2495 unsigned i;
2496
2497 Assert(pVM->rem.s.fInREM);
2498
2499 /*
2500 * Copy back the registers.
2501 * This is done in the order they are declared in the CPUMCTX structure.
2502 */
2503
2504 /** @todo FOP */
2505 /** @todo FPUIP */
2506 /** @todo CS */
2507 /** @todo FPUDP */
2508 /** @todo DS */
2509 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2510 pCtx->fpu.MXCSR = 0;
2511 pCtx->fpu.MXCSR_MASK = 0;
2512
2513 /** @todo check if FPU/XMM was actually used in the recompiler */
2514 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2515//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2516
2517#ifdef TARGET_X86_64
2518 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2519 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2520 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2521 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2522 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2523 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2524 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2525 pCtx->r8 = pVM->rem.s.Env.regs[8];
2526 pCtx->r9 = pVM->rem.s.Env.regs[9];
2527 pCtx->r10 = pVM->rem.s.Env.regs[10];
2528 pCtx->r11 = pVM->rem.s.Env.regs[11];
2529 pCtx->r12 = pVM->rem.s.Env.regs[12];
2530 pCtx->r13 = pVM->rem.s.Env.regs[13];
2531 pCtx->r14 = pVM->rem.s.Env.regs[14];
2532 pCtx->r15 = pVM->rem.s.Env.regs[15];
2533
2534 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2535#else
2536 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2537 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2538 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2539 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2540 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2541 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2542 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2543
2544 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2545#endif
2546
2547 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2548
2549 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2550 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2551 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2552 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2553 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2554
2555#ifdef TARGET_X86_64
2556 pCtx->rip = pVM->rem.s.Env.eip;
2557 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2558#else
2559 pCtx->eip = pVM->rem.s.Env.eip;
2560 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2561#endif
2562
2563 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2564 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2565 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2566 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2567 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2568 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2569
2570 for (i = 0; i < 8; i++)
2571 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2572
2573 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2574 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2575 {
2576 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2577 STAM_COUNTER_INC(&gStatREMGDTChange);
2578 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2579 }
2580
2581 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2582 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2583 {
2584 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2585 STAM_COUNTER_INC(&gStatREMIDTChange);
2586 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2587 }
2588
2589 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2590 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2591 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2592 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2593 {
2594 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2595 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2596 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2597 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2598 STAM_COUNTER_INC(&gStatREMLDTRChange);
2599 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2600 }
2601
2602 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2603 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2604 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2605 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2606 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2607 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2608 : 0) )
2609 {
2610 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2611 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2612 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2613 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2614 pCtx->tr = pVM->rem.s.Env.tr.selector;
2615 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2616 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2617 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2618 if (pCtx->trHid.Attr.u)
2619 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2620 STAM_COUNTER_INC(&gStatREMTRChange);
2621 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2622 }
2623
2624 /** @todo These values could still be out of sync! */
2625 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2626 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2627 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2628 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2629
2630 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2631 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2632 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2633
2634 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2635 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2636 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2637
2638 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2639 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2640 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2641
2642 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2643 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2644 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2645
2646 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2647 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2648 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2649
2650 /* Sysenter MSR */
2651 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2652 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2653 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2654
2655 /* System MSRs. */
2656 pCtx->msrEFER = pVM->rem.s.Env.efer;
2657 pCtx->msrSTAR = pVM->rem.s.Env.star;
2658 pCtx->msrPAT = pVM->rem.s.Env.pat;
2659#ifdef TARGET_X86_64
2660 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2661 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2662 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2663 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2664#endif
2665
2666}
2667
2668
2669/**
2670 * Update the VMM state information if we're currently in REM.
2671 *
2672 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2673 * we're currently executing in REM and the VMM state is invalid. This method will of
2674 * course check that we're executing in REM before syncing any data over to the VMM.
2675 *
2676 * @param pVM The VM handle.
2677 * @param pVCpu The VMCPU handle.
2678 */
2679REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2680{
2681 if (pVM->rem.s.fInREM)
2682 remR3StateUpdate(pVM, pVCpu);
2683}
2684
2685
2686#undef LOG_GROUP
2687#define LOG_GROUP LOG_GROUP_REM
2688
2689
2690/**
2691 * Notify the recompiler about Address Gate 20 state change.
2692 *
2693 * This notification is required since A20 gate changes are
2694 * initialized from a device driver and the VM might just as
2695 * well be in REM mode as in RAW mode.
2696 *
2697 * @param pVM VM handle.
2698 * @param pVCpu VMCPU handle.
2699 * @param fEnable True if the gate should be enabled.
2700 * False if the gate should be disabled.
2701 */
2702REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2703{
2704 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2705 VM_ASSERT_EMT(pVM);
2706
2707 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2708 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2709 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2710}
2711
2712
2713/**
2714 * Replays the handler notification changes
2715 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2716 *
2717 * @param pVM VM handle.
2718 */
2719REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2720{
2721 /*
2722 * Replay the flushes.
2723 */
2724 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2725 VM_ASSERT_EMT(pVM);
2726
2727 /** @todo this isn't ensuring correct replay order. */
2728 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2729 {
2730 uint32_t idxNext;
2731 uint32_t idxRevHead;
2732 uint32_t idxHead;
2733#ifdef VBOX_STRICT
2734 int32_t c = 0;
2735#endif
2736
2737 /* Lockless purging of pending notifications. */
2738 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2739 if (idxHead == UINT32_MAX)
2740 return;
2741 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2742
2743 /*
2744 * Reverse the list to process it in FIFO order.
2745 */
2746 idxRevHead = UINT32_MAX;
2747 do
2748 {
2749 /* Save the index of the next rec. */
2750 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2751 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2752 /* Push the record onto the reversed list. */
2753 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2754 idxRevHead = idxHead;
2755 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2756 /* Advance. */
2757 idxHead = idxNext;
2758 } while (idxHead != UINT32_MAX);
2759
2760 /*
2761 * Loop thru the list, reinserting the record into the free list as they are
2762 * processed to avoid having other EMTs running out of entries while we're flushing.
2763 */
2764 idxHead = idxRevHead;
2765 do
2766 {
2767 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
2768 uint32_t idxCur;
2769 Assert(--c >= 0);
2770
2771 switch (pCur->enmKind)
2772 {
2773 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2774 remR3NotifyHandlerPhysicalRegister(pVM,
2775 pCur->u.PhysicalRegister.enmType,
2776 pCur->u.PhysicalRegister.GCPhys,
2777 pCur->u.PhysicalRegister.cb,
2778 pCur->u.PhysicalRegister.fHasHCHandler);
2779 break;
2780
2781 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2782 remR3NotifyHandlerPhysicalDeregister(pVM,
2783 pCur->u.PhysicalDeregister.enmType,
2784 pCur->u.PhysicalDeregister.GCPhys,
2785 pCur->u.PhysicalDeregister.cb,
2786 pCur->u.PhysicalDeregister.fHasHCHandler,
2787 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2788 break;
2789
2790 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2791 remR3NotifyHandlerPhysicalModify(pVM,
2792 pCur->u.PhysicalModify.enmType,
2793 pCur->u.PhysicalModify.GCPhysOld,
2794 pCur->u.PhysicalModify.GCPhysNew,
2795 pCur->u.PhysicalModify.cb,
2796 pCur->u.PhysicalModify.fHasHCHandler,
2797 pCur->u.PhysicalModify.fRestoreAsRAM);
2798 break;
2799
2800 default:
2801 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2802 break;
2803 }
2804
2805 /*
2806 * Advance idxHead.
2807 */
2808 idxCur = idxHead;
2809 idxHead = pCur->idxNext;
2810 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
2811
2812 /*
2813 * Put the record back into the free list.
2814 */
2815 do
2816 {
2817 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2818 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2819 ASMCompilerBarrier();
2820 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
2821 } while (idxHead != UINT32_MAX);
2822
2823#ifdef VBOX_STRICT
2824 if (pVM->cCpus == 1)
2825 {
2826 unsigned c;
2827 /* Check that all records are now on the free list. */
2828 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
2829 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
2830 c++;
2831 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
2832 }
2833#endif
2834 }
2835}
2836
2837
2838/**
2839 * Notify REM about changed code page.
2840 *
2841 * @returns VBox status code.
2842 * @param pVM VM handle.
2843 * @param pVCpu VMCPU handle.
2844 * @param pvCodePage Code page address
2845 */
2846REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2847{
2848#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2849 int rc;
2850 RTGCPHYS PhysGC;
2851 uint64_t flags;
2852
2853 VM_ASSERT_EMT(pVM);
2854
2855 /*
2856 * Get the physical page address.
2857 */
2858 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2859 if (rc == VINF_SUCCESS)
2860 {
2861 /*
2862 * Sync the required registers and flush the whole page.
2863 * (Easier to do the whole page than notifying it about each physical
2864 * byte that was changed.
2865 */
2866 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2867 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2868 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2869 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2870
2871 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2872 }
2873#endif
2874 return VINF_SUCCESS;
2875}
2876
2877
2878/**
2879 * Notification about a successful MMR3PhysRegister() call.
2880 *
2881 * @param pVM VM handle.
2882 * @param GCPhys The physical address the RAM.
2883 * @param cb Size of the memory.
2884 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2885 */
2886REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2887{
2888 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2889 VM_ASSERT_EMT(pVM);
2890
2891 /*
2892 * Validate input - we trust the caller.
2893 */
2894 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2895 Assert(cb);
2896 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2897 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2898
2899 /*
2900 * Base ram? Update GCPhysLastRam.
2901 */
2902 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2903 {
2904 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2905 {
2906 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2907 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2908 }
2909 }
2910
2911 /*
2912 * Register the ram.
2913 */
2914 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2915
2916 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2917 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2918 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2919
2920 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2921}
2922
2923
2924/**
2925 * Notification about a successful MMR3PhysRomRegister() call.
2926 *
2927 * @param pVM VM handle.
2928 * @param GCPhys The physical address of the ROM.
2929 * @param cb The size of the ROM.
2930 * @param pvCopy Pointer to the ROM copy.
2931 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2932 * This function will be called when ever the protection of the
2933 * shadow ROM changes (at reset and end of POST).
2934 */
2935REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2936{
2937 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2938 VM_ASSERT_EMT(pVM);
2939
2940 /*
2941 * Validate input - we trust the caller.
2942 */
2943 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2944 Assert(cb);
2945 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2946
2947 /*
2948 * Register the rom.
2949 */
2950 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2951
2952 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2953 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2954 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2955
2956 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2957}
2958
2959
2960/**
2961 * Notification about a successful memory deregistration or reservation.
2962 *
2963 * @param pVM VM Handle.
2964 * @param GCPhys Start physical address.
2965 * @param cb The size of the range.
2966 */
2967REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2968{
2969 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2970 VM_ASSERT_EMT(pVM);
2971
2972 /*
2973 * Validate input - we trust the caller.
2974 */
2975 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2976 Assert(cb);
2977 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2978
2979 /*
2980 * Unassigning the memory.
2981 */
2982 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2983
2984 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2985 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2986 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2987
2988 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2989}
2990
2991
2992/**
2993 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2994 *
2995 * @param pVM VM Handle.
2996 * @param enmType Handler type.
2997 * @param GCPhys Handler range address.
2998 * @param cb Size of the handler range.
2999 * @param fHasHCHandler Set if the handler has a HC callback function.
3000 *
3001 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3002 * Handler memory type to memory which has no HC handler.
3003 */
3004static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3005{
3006 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3007 enmType, GCPhys, cb, fHasHCHandler));
3008
3009 VM_ASSERT_EMT(pVM);
3010 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3011 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3012
3013
3014 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3015
3016 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3017 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3018 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3019 else if (fHasHCHandler)
3020 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3021 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3022
3023 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3024}
3025
3026/**
3027 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3028 *
3029 * @param pVM VM Handle.
3030 * @param enmType Handler type.
3031 * @param GCPhys Handler range address.
3032 * @param cb Size of the handler range.
3033 * @param fHasHCHandler Set if the handler has a HC callback function.
3034 *
3035 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3036 * Handler memory type to memory which has no HC handler.
3037 */
3038REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3039{
3040 REMR3ReplayHandlerNotifications(pVM);
3041
3042 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3043}
3044
3045/**
3046 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3047 *
3048 * @param pVM VM Handle.
3049 * @param enmType Handler type.
3050 * @param GCPhys Handler range address.
3051 * @param cb Size of the handler range.
3052 * @param fHasHCHandler Set if the handler has a HC callback function.
3053 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3054 */
3055static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3056{
3057 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3058 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3059 VM_ASSERT_EMT(pVM);
3060
3061
3062 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3063
3064 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3065 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3066 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3067 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3068 else if (fHasHCHandler)
3069 {
3070 if (!fRestoreAsRAM)
3071 {
3072 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3073 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3074 }
3075 else
3076 {
3077 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3078 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3079 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3080 }
3081 }
3082 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3083
3084 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3085}
3086
3087/**
3088 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3089 *
3090 * @param pVM VM Handle.
3091 * @param enmType Handler type.
3092 * @param GCPhys Handler range address.
3093 * @param cb Size of the handler range.
3094 * @param fHasHCHandler Set if the handler has a HC callback function.
3095 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3096 */
3097REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3098{
3099 REMR3ReplayHandlerNotifications(pVM);
3100 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3101}
3102
3103
3104/**
3105 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3106 *
3107 * @param pVM VM Handle.
3108 * @param enmType Handler type.
3109 * @param GCPhysOld Old handler range address.
3110 * @param GCPhysNew New handler range address.
3111 * @param cb Size of the handler range.
3112 * @param fHasHCHandler Set if the handler has a HC callback function.
3113 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3114 */
3115static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3116{
3117 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3118 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3119 VM_ASSERT_EMT(pVM);
3120 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3121
3122 if (fHasHCHandler)
3123 {
3124 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3125
3126 /*
3127 * Reset the old page.
3128 */
3129 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3130 if (!fRestoreAsRAM)
3131 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3132 else
3133 {
3134 /* This is not perfect, but it'll do for PD monitoring... */
3135 Assert(cb == PAGE_SIZE);
3136 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3137 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3138 }
3139
3140 /*
3141 * Update the new page.
3142 */
3143 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3144 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3145 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3146 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3147
3148 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3149 }
3150}
3151
3152/**
3153 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3154 *
3155 * @param pVM VM Handle.
3156 * @param enmType Handler type.
3157 * @param GCPhysOld Old handler range address.
3158 * @param GCPhysNew New handler range address.
3159 * @param cb Size of the handler range.
3160 * @param fHasHCHandler Set if the handler has a HC callback function.
3161 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3162 */
3163REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3164{
3165 REMR3ReplayHandlerNotifications(pVM);
3166
3167 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3168}
3169
3170/**
3171 * Checks if we're handling access to this page or not.
3172 *
3173 * @returns true if we're trapping access.
3174 * @returns false if we aren't.
3175 * @param pVM The VM handle.
3176 * @param GCPhys The physical address.
3177 *
3178 * @remark This function will only work correctly in VBOX_STRICT builds!
3179 */
3180REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3181{
3182#ifdef VBOX_STRICT
3183 unsigned long off;
3184 REMR3ReplayHandlerNotifications(pVM);
3185
3186 off = get_phys_page_offset(GCPhys);
3187 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3188 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3189 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3190#else
3191 return false;
3192#endif
3193}
3194
3195
3196/**
3197 * Deals with a rare case in get_phys_addr_code where the code
3198 * is being monitored.
3199 *
3200 * It could also be an MMIO page, in which case we will raise a fatal error.
3201 *
3202 * @returns The physical address corresponding to addr.
3203 * @param env The cpu environment.
3204 * @param addr The virtual address.
3205 * @param pTLBEntry The TLB entry.
3206 */
3207target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3208 target_ulong addr,
3209 CPUTLBEntry* pTLBEntry,
3210 target_phys_addr_t ioTLBEntry)
3211{
3212 PVM pVM = env->pVM;
3213
3214 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3215 {
3216 /* If code memory is being monitored, appropriate IOTLB entry will have
3217 handler IO type, and addend will provide real physical address, no
3218 matter if we store VA in TLB or not, as handlers are always passed PA */
3219 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3220 return ret;
3221 }
3222 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3223 "*** handlers\n",
3224 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3225 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3226 LogRel(("*** mmio\n"));
3227 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3228 LogRel(("*** phys\n"));
3229 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3230 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3231 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3232 AssertFatalFailed();
3233}
3234
3235/**
3236 * Read guest RAM and ROM.
3237 *
3238 * @param SrcGCPhys The source address (guest physical).
3239 * @param pvDst The destination address.
3240 * @param cb Number of bytes
3241 */
3242void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3243{
3244 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3245 VBOX_CHECK_ADDR(SrcGCPhys);
3246 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3247#ifdef VBOX_DEBUG_PHYS
3248 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3249#endif
3250 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3251}
3252
3253
3254/**
3255 * Read guest RAM and ROM, unsigned 8-bit.
3256 *
3257 * @param SrcGCPhys The source address (guest physical).
3258 */
3259RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3260{
3261 uint8_t val;
3262 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3263 VBOX_CHECK_ADDR(SrcGCPhys);
3264 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3265 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3266#ifdef VBOX_DEBUG_PHYS
3267 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3268#endif
3269 return val;
3270}
3271
3272
3273/**
3274 * Read guest RAM and ROM, signed 8-bit.
3275 *
3276 * @param SrcGCPhys The source address (guest physical).
3277 */
3278RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3279{
3280 int8_t val;
3281 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3282 VBOX_CHECK_ADDR(SrcGCPhys);
3283 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3284 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3285#ifdef VBOX_DEBUG_PHYS
3286 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3287#endif
3288 return val;
3289}
3290
3291
3292/**
3293 * Read guest RAM and ROM, unsigned 16-bit.
3294 *
3295 * @param SrcGCPhys The source address (guest physical).
3296 */
3297RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3298{
3299 uint16_t val;
3300 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3301 VBOX_CHECK_ADDR(SrcGCPhys);
3302 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3303 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3304#ifdef VBOX_DEBUG_PHYS
3305 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3306#endif
3307 return val;
3308}
3309
3310
3311/**
3312 * Read guest RAM and ROM, signed 16-bit.
3313 *
3314 * @param SrcGCPhys The source address (guest physical).
3315 */
3316RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3317{
3318 int16_t val;
3319 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3320 VBOX_CHECK_ADDR(SrcGCPhys);
3321 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3322 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3323#ifdef VBOX_DEBUG_PHYS
3324 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3325#endif
3326 return val;
3327}
3328
3329
3330/**
3331 * Read guest RAM and ROM, unsigned 32-bit.
3332 *
3333 * @param SrcGCPhys The source address (guest physical).
3334 */
3335RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3336{
3337 uint32_t val;
3338 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3339 VBOX_CHECK_ADDR(SrcGCPhys);
3340 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3341 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3342#ifdef VBOX_DEBUG_PHYS
3343 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3344#endif
3345 return val;
3346}
3347
3348
3349/**
3350 * Read guest RAM and ROM, signed 32-bit.
3351 *
3352 * @param SrcGCPhys The source address (guest physical).
3353 */
3354RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3355{
3356 int32_t val;
3357 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3358 VBOX_CHECK_ADDR(SrcGCPhys);
3359 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3360 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3361#ifdef VBOX_DEBUG_PHYS
3362 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3363#endif
3364 return val;
3365}
3366
3367
3368/**
3369 * Read guest RAM and ROM, unsigned 64-bit.
3370 *
3371 * @param SrcGCPhys The source address (guest physical).
3372 */
3373uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3374{
3375 uint64_t val;
3376 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3377 VBOX_CHECK_ADDR(SrcGCPhys);
3378 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3379 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3380#ifdef VBOX_DEBUG_PHYS
3381 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3382#endif
3383 return val;
3384}
3385
3386
3387/**
3388 * Read guest RAM and ROM, signed 64-bit.
3389 *
3390 * @param SrcGCPhys The source address (guest physical).
3391 */
3392int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3393{
3394 int64_t val;
3395 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3396 VBOX_CHECK_ADDR(SrcGCPhys);
3397 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3398 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3399#ifdef VBOX_DEBUG_PHYS
3400 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3401#endif
3402 return val;
3403}
3404
3405
3406/**
3407 * Write guest RAM.
3408 *
3409 * @param DstGCPhys The destination address (guest physical).
3410 * @param pvSrc The source address.
3411 * @param cb Number of bytes to write
3412 */
3413void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3414{
3415 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3416 VBOX_CHECK_ADDR(DstGCPhys);
3417 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3418 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3419#ifdef VBOX_DEBUG_PHYS
3420 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3421#endif
3422}
3423
3424
3425/**
3426 * Write guest RAM, unsigned 8-bit.
3427 *
3428 * @param DstGCPhys The destination address (guest physical).
3429 * @param val Value
3430 */
3431void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3432{
3433 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3434 VBOX_CHECK_ADDR(DstGCPhys);
3435 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3436 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3437#ifdef VBOX_DEBUG_PHYS
3438 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3439#endif
3440}
3441
3442
3443/**
3444 * Write guest RAM, unsigned 8-bit.
3445 *
3446 * @param DstGCPhys The destination address (guest physical).
3447 * @param val Value
3448 */
3449void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3450{
3451 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3452 VBOX_CHECK_ADDR(DstGCPhys);
3453 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3454 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3455#ifdef VBOX_DEBUG_PHYS
3456 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3457#endif
3458}
3459
3460
3461/**
3462 * Write guest RAM, unsigned 32-bit.
3463 *
3464 * @param DstGCPhys The destination address (guest physical).
3465 * @param val Value
3466 */
3467void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3468{
3469 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3470 VBOX_CHECK_ADDR(DstGCPhys);
3471 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3472 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3473#ifdef VBOX_DEBUG_PHYS
3474 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3475#endif
3476}
3477
3478
3479/**
3480 * Write guest RAM, unsigned 64-bit.
3481 *
3482 * @param DstGCPhys The destination address (guest physical).
3483 * @param val Value
3484 */
3485void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3486{
3487 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3488 VBOX_CHECK_ADDR(DstGCPhys);
3489 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3490 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3491#ifdef VBOX_DEBUG_PHYS
3492 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3493#endif
3494}
3495
3496#undef LOG_GROUP
3497#define LOG_GROUP LOG_GROUP_REM_MMIO
3498
3499/** Read MMIO memory. */
3500static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3501{
3502 uint32_t u32 = 0;
3503 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3504 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3505 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3506 return u32;
3507}
3508
3509/** Read MMIO memory. */
3510static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3511{
3512 uint32_t u32 = 0;
3513 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3514 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3515 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3516 return u32;
3517}
3518
3519/** Read MMIO memory. */
3520static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3521{
3522 uint32_t u32 = 0;
3523 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3524 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3525 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3526 return u32;
3527}
3528
3529/** Write to MMIO memory. */
3530static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3531{
3532 int rc;
3533 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3534 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3535 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3536}
3537
3538/** Write to MMIO memory. */
3539static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3540{
3541 int rc;
3542 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3543 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3544 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3545}
3546
3547/** Write to MMIO memory. */
3548static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3549{
3550 int rc;
3551 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3552 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3553 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3554}
3555
3556
3557#undef LOG_GROUP
3558#define LOG_GROUP LOG_GROUP_REM_HANDLER
3559
3560/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3561
3562static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3563{
3564 uint8_t u8;
3565 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3566 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3567 return u8;
3568}
3569
3570static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3571{
3572 uint16_t u16;
3573 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3574 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3575 return u16;
3576}
3577
3578static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3579{
3580 uint32_t u32;
3581 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3582 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3583 return u32;
3584}
3585
3586static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3587{
3588 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3589 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3590}
3591
3592static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3593{
3594 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3595 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3596}
3597
3598static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3599{
3600 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3601 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3602}
3603
3604/* -+- disassembly -+- */
3605
3606#undef LOG_GROUP
3607#define LOG_GROUP LOG_GROUP_REM_DISAS
3608
3609
3610/**
3611 * Enables or disables singled stepped disassembly.
3612 *
3613 * @returns VBox status code.
3614 * @param pVM VM handle.
3615 * @param fEnable To enable set this flag, to disable clear it.
3616 */
3617static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3618{
3619 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3620 VM_ASSERT_EMT(pVM);
3621
3622 if (fEnable)
3623 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3624 else
3625 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3626 return VINF_SUCCESS;
3627}
3628
3629
3630/**
3631 * Enables or disables singled stepped disassembly.
3632 *
3633 * @returns VBox status code.
3634 * @param pVM VM handle.
3635 * @param fEnable To enable set this flag, to disable clear it.
3636 */
3637REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3638{
3639 int rc;
3640
3641 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3642 if (VM_IS_EMT(pVM))
3643 return remR3DisasEnableStepping(pVM, fEnable);
3644
3645 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3646 AssertRC(rc);
3647 return rc;
3648}
3649
3650
3651#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3652/**
3653 * External Debugger Command: .remstep [on|off|1|0]
3654 */
3655static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3656{
3657 bool fEnable;
3658 int rc;
3659
3660 /* print status */
3661 if (cArgs == 0)
3662 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3663 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3664
3665 /* convert the argument and change the mode. */
3666 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3667 if (RT_FAILURE(rc))
3668 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3669 rc = REMR3DisasEnableStepping(pVM, fEnable);
3670 if (RT_FAILURE(rc))
3671 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3672 return rc;
3673}
3674#endif
3675
3676
3677/**
3678 * Disassembles one instruction and prints it to the log.
3679 *
3680 * @returns Success indicator.
3681 * @param env Pointer to the recompiler CPU structure.
3682 * @param f32BitCode Indicates that whether or not the code should
3683 * be disassembled as 16 or 32 bit. If -1 the CS
3684 * selector will be inspected.
3685 * @param pszPrefix
3686 */
3687bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3688{
3689 PVM pVM = env->pVM;
3690 const bool fLog = LogIsEnabled();
3691 const bool fLog2 = LogIs2Enabled();
3692 int rc = VINF_SUCCESS;
3693
3694 /*
3695 * Don't bother if there ain't any log output to do.
3696 */
3697 if (!fLog && !fLog2)
3698 return true;
3699
3700 /*
3701 * Update the state so DBGF reads the correct register values.
3702 */
3703 remR3StateUpdate(pVM, env->pVCpu);
3704
3705 /*
3706 * Log registers if requested.
3707 */
3708 if (!fLog2)
3709 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3710
3711 /*
3712 * Disassemble to log.
3713 */
3714 if (fLog)
3715 rc = DBGFR3DisasInstrCurrentLogInternal(env->pVCpu, pszPrefix);
3716
3717 return RT_SUCCESS(rc);
3718}
3719
3720
3721/**
3722 * Disassemble recompiled code.
3723 *
3724 * @param phFileIgnored Ignored, logfile usually.
3725 * @param pvCode Pointer to the code block.
3726 * @param cb Size of the code block.
3727 */
3728void disas(FILE *phFile, void *pvCode, unsigned long cb)
3729{
3730#ifdef DEBUG_TMP_LOGGING
3731# define DISAS_PRINTF(x...) fprintf(phFile, x)
3732#else
3733# define DISAS_PRINTF(x...) RTLogPrintf(x)
3734 if (LogIs2Enabled())
3735#endif
3736 {
3737 unsigned off = 0;
3738 char szOutput[256];
3739 DISCPUSTATE Cpu;
3740
3741 memset(&Cpu, 0, sizeof(Cpu));
3742#ifdef RT_ARCH_X86
3743 Cpu.mode = CPUMODE_32BIT;
3744#else
3745 Cpu.mode = CPUMODE_64BIT;
3746#endif
3747
3748 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3749 while (off < cb)
3750 {
3751 uint32_t cbInstr;
3752 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3753 DISAS_PRINTF("%s", szOutput);
3754 else
3755 {
3756 DISAS_PRINTF("disas error\n");
3757 cbInstr = 1;
3758#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3759 break;
3760#endif
3761 }
3762 off += cbInstr;
3763 }
3764 }
3765
3766#undef DISAS_PRINTF
3767}
3768
3769
3770/**
3771 * Disassemble guest code.
3772 *
3773 * @param phFileIgnored Ignored, logfile usually.
3774 * @param uCode The guest address of the code to disassemble. (flat?)
3775 * @param cb Number of bytes to disassemble.
3776 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3777 */
3778void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3779{
3780#ifdef DEBUG_TMP_LOGGING
3781# define DISAS_PRINTF(x...) fprintf(phFile, x)
3782#else
3783# define DISAS_PRINTF(x...) RTLogPrintf(x)
3784 if (LogIs2Enabled())
3785#endif
3786 {
3787 PVM pVM = cpu_single_env->pVM;
3788 PVMCPU pVCpu = cpu_single_env->pVCpu;
3789 RTSEL cs;
3790 RTGCUINTPTR eip;
3791
3792 Assert(pVCpu);
3793
3794 /*
3795 * Update the state so DBGF reads the correct register values (flags).
3796 */
3797 remR3StateUpdate(pVM, pVCpu);
3798
3799 /*
3800 * Do the disassembling.
3801 */
3802 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3803 cs = cpu_single_env->segs[R_CS].selector;
3804 eip = uCode - cpu_single_env->segs[R_CS].base;
3805 for (;;)
3806 {
3807 char szBuf[256];
3808 uint32_t cbInstr;
3809 int rc = DBGFR3DisasInstrEx(pVM,
3810 pVCpu->idCpu,
3811 cs,
3812 eip,
3813 0,
3814 szBuf, sizeof(szBuf),
3815 &cbInstr);
3816 if (RT_SUCCESS(rc))
3817 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3818 else
3819 {
3820 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3821 cbInstr = 1;
3822 }
3823
3824 /* next */
3825 if (cb <= cbInstr)
3826 break;
3827 cb -= cbInstr;
3828 uCode += cbInstr;
3829 eip += cbInstr;
3830 }
3831 }
3832#undef DISAS_PRINTF
3833}
3834
3835
3836/**
3837 * Looks up a guest symbol.
3838 *
3839 * @returns Pointer to symbol name. This is a static buffer.
3840 * @param orig_addr The address in question.
3841 */
3842const char *lookup_symbol(target_ulong orig_addr)
3843{
3844 PVM pVM = cpu_single_env->pVM;
3845 RTGCINTPTR off = 0;
3846 RTDBGSYMBOL Sym;
3847 DBGFADDRESS Addr;
3848
3849 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
3850 if (RT_SUCCESS(rc))
3851 {
3852 static char szSym[sizeof(Sym.szName) + 48];
3853 if (!off)
3854 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3855 else if (off > 0)
3856 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3857 else
3858 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3859 return szSym;
3860 }
3861 return "<N/A>";
3862}
3863
3864
3865#undef LOG_GROUP
3866#define LOG_GROUP LOG_GROUP_REM
3867
3868
3869/* -+- FF notifications -+- */
3870
3871
3872/**
3873 * Notification about a pending interrupt.
3874 *
3875 * @param pVM VM Handle.
3876 * @param pVCpu VMCPU Handle.
3877 * @param u8Interrupt Interrupt
3878 * @thread The emulation thread.
3879 */
3880REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3881{
3882 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3883 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3884}
3885
3886/**
3887 * Notification about a pending interrupt.
3888 *
3889 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3890 * @param pVM VM Handle.
3891 * @param pVCpu VMCPU Handle.
3892 * @thread The emulation thread.
3893 */
3894REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3895{
3896 return pVM->rem.s.u32PendingInterrupt;
3897}
3898
3899/**
3900 * Notification about the interrupt FF being set.
3901 *
3902 * @param pVM VM Handle.
3903 * @param pVCpu VMCPU Handle.
3904 * @thread The emulation thread.
3905 */
3906REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3907{
3908 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3909 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3910 if (pVM->rem.s.fInREM)
3911 {
3912 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3913 CPU_INTERRUPT_EXTERNAL_HARD);
3914 }
3915}
3916
3917
3918/**
3919 * Notification about the interrupt FF being set.
3920 *
3921 * @param pVM VM Handle.
3922 * @param pVCpu VMCPU Handle.
3923 * @thread Any.
3924 */
3925REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3926{
3927 LogFlow(("REMR3NotifyInterruptClear:\n"));
3928 if (pVM->rem.s.fInREM)
3929 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3930}
3931
3932
3933/**
3934 * Notification about pending timer(s).
3935 *
3936 * @param pVM VM Handle.
3937 * @param pVCpuDst The target cpu for this notification.
3938 * TM will not broadcast pending timer events, but use
3939 * a decidated EMT for them. So, only interrupt REM
3940 * execution if the given CPU is executing in REM.
3941 * @thread Any.
3942 */
3943REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3944{
3945#ifndef DEBUG_bird
3946 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3947#endif
3948 if (pVM->rem.s.fInREM)
3949 {
3950 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3951 {
3952 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3953 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3954 CPU_INTERRUPT_EXTERNAL_TIMER);
3955 }
3956 else
3957 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3958 }
3959 else
3960 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3961}
3962
3963
3964/**
3965 * Notification about pending DMA transfers.
3966 *
3967 * @param pVM VM Handle.
3968 * @thread Any.
3969 */
3970REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3971{
3972 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3973 if (pVM->rem.s.fInREM)
3974 {
3975 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3976 CPU_INTERRUPT_EXTERNAL_DMA);
3977 }
3978}
3979
3980
3981/**
3982 * Notification about pending timer(s).
3983 *
3984 * @param pVM VM Handle.
3985 * @thread Any.
3986 */
3987REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3988{
3989 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3990 if (pVM->rem.s.fInREM)
3991 {
3992 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3993 CPU_INTERRUPT_EXTERNAL_EXIT);
3994 }
3995}
3996
3997
3998/**
3999 * Notification about pending FF set by an external thread.
4000 *
4001 * @param pVM VM handle.
4002 * @thread Any.
4003 */
4004REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4005{
4006 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4007 if (pVM->rem.s.fInREM)
4008 {
4009 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4010 CPU_INTERRUPT_EXTERNAL_EXIT);
4011 }
4012}
4013
4014
4015#ifdef VBOX_WITH_STATISTICS
4016void remR3ProfileStart(int statcode)
4017{
4018 STAMPROFILEADV *pStat;
4019 switch(statcode)
4020 {
4021 case STATS_EMULATE_SINGLE_INSTR:
4022 pStat = &gStatExecuteSingleInstr;
4023 break;
4024 case STATS_QEMU_COMPILATION:
4025 pStat = &gStatCompilationQEmu;
4026 break;
4027 case STATS_QEMU_RUN_EMULATED_CODE:
4028 pStat = &gStatRunCodeQEmu;
4029 break;
4030 case STATS_QEMU_TOTAL:
4031 pStat = &gStatTotalTimeQEmu;
4032 break;
4033 case STATS_QEMU_RUN_TIMERS:
4034 pStat = &gStatTimers;
4035 break;
4036 case STATS_TLB_LOOKUP:
4037 pStat= &gStatTBLookup;
4038 break;
4039 case STATS_IRQ_HANDLING:
4040 pStat= &gStatIRQ;
4041 break;
4042 case STATS_RAW_CHECK:
4043 pStat = &gStatRawCheck;
4044 break;
4045
4046 default:
4047 AssertMsgFailed(("unknown stat %d\n", statcode));
4048 return;
4049 }
4050 STAM_PROFILE_ADV_START(pStat, a);
4051}
4052
4053
4054void remR3ProfileStop(int statcode)
4055{
4056 STAMPROFILEADV *pStat;
4057 switch(statcode)
4058 {
4059 case STATS_EMULATE_SINGLE_INSTR:
4060 pStat = &gStatExecuteSingleInstr;
4061 break;
4062 case STATS_QEMU_COMPILATION:
4063 pStat = &gStatCompilationQEmu;
4064 break;
4065 case STATS_QEMU_RUN_EMULATED_CODE:
4066 pStat = &gStatRunCodeQEmu;
4067 break;
4068 case STATS_QEMU_TOTAL:
4069 pStat = &gStatTotalTimeQEmu;
4070 break;
4071 case STATS_QEMU_RUN_TIMERS:
4072 pStat = &gStatTimers;
4073 break;
4074 case STATS_TLB_LOOKUP:
4075 pStat= &gStatTBLookup;
4076 break;
4077 case STATS_IRQ_HANDLING:
4078 pStat= &gStatIRQ;
4079 break;
4080 case STATS_RAW_CHECK:
4081 pStat = &gStatRawCheck;
4082 break;
4083 default:
4084 AssertMsgFailed(("unknown stat %d\n", statcode));
4085 return;
4086 }
4087 STAM_PROFILE_ADV_STOP(pStat, a);
4088}
4089#endif
4090
4091/**
4092 * Raise an RC, force rem exit.
4093 *
4094 * @param pVM VM handle.
4095 * @param rc The rc.
4096 */
4097void remR3RaiseRC(PVM pVM, int rc)
4098{
4099 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4100 Assert(pVM->rem.s.fInREM);
4101 VM_ASSERT_EMT(pVM);
4102 pVM->rem.s.rc = rc;
4103 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4104}
4105
4106
4107/* -+- timers -+- */
4108
4109uint64_t cpu_get_tsc(CPUX86State *env)
4110{
4111 STAM_COUNTER_INC(&gStatCpuGetTSC);
4112 return TMCpuTickGet(env->pVCpu);
4113}
4114
4115
4116/* -+- interrupts -+- */
4117
4118void cpu_set_ferr(CPUX86State *env)
4119{
4120 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4121 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4122}
4123
4124int cpu_get_pic_interrupt(CPUState *env)
4125{
4126 uint8_t u8Interrupt;
4127 int rc;
4128
4129 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4130 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4131 * with the (a)pic.
4132 */
4133 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4134 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4135 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4136 * remove this kludge. */
4137 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4138 {
4139 rc = VINF_SUCCESS;
4140 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4141 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4142 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4143 }
4144 else
4145 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4146
4147 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4148 if (RT_SUCCESS(rc))
4149 {
4150 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4151 env->interrupt_request |= CPU_INTERRUPT_HARD;
4152 return u8Interrupt;
4153 }
4154 return -1;
4155}
4156
4157
4158/* -+- local apic -+- */
4159
4160void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4161{
4162 int rc = PDMApicSetBase(env->pVM, val);
4163 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4164}
4165
4166uint64_t cpu_get_apic_base(CPUX86State *env)
4167{
4168 uint64_t u64;
4169 int rc = PDMApicGetBase(env->pVM, &u64);
4170 if (RT_SUCCESS(rc))
4171 {
4172 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4173 return u64;
4174 }
4175 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4176 return 0;
4177}
4178
4179void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4180{
4181 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4182 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4183}
4184
4185uint8_t cpu_get_apic_tpr(CPUX86State *env)
4186{
4187 uint8_t u8;
4188 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4189 if (RT_SUCCESS(rc))
4190 {
4191 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4192 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4193 }
4194 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4195 return 0;
4196}
4197
4198
4199uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4200{
4201 uint64_t value;
4202 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4203 if (RT_SUCCESS(rc))
4204 {
4205 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4206 return value;
4207 }
4208 /** @todo: exception ? */
4209 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4210 return value;
4211}
4212
4213void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4214{
4215 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4216 /** @todo: exception if error ? */
4217 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4218}
4219
4220uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4221{
4222 Assert(env->pVCpu);
4223 return CPUMGetGuestMsr(env->pVCpu, msr);
4224}
4225
4226void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4227{
4228 Assert(env->pVCpu);
4229 CPUMSetGuestMsr(env->pVCpu, msr, val);
4230}
4231
4232/* -+- I/O Ports -+- */
4233
4234#undef LOG_GROUP
4235#define LOG_GROUP LOG_GROUP_REM_IOPORT
4236
4237void cpu_outb(CPUState *env, int addr, int val)
4238{
4239 int rc;
4240
4241 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4242 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4243
4244 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4245 if (RT_LIKELY(rc == VINF_SUCCESS))
4246 return;
4247 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4248 {
4249 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4250 remR3RaiseRC(env->pVM, rc);
4251 return;
4252 }
4253 remAbort(rc, __FUNCTION__);
4254}
4255
4256void cpu_outw(CPUState *env, int addr, int val)
4257{
4258 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4259 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4260 if (RT_LIKELY(rc == VINF_SUCCESS))
4261 return;
4262 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4263 {
4264 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4265 remR3RaiseRC(env->pVM, rc);
4266 return;
4267 }
4268 remAbort(rc, __FUNCTION__);
4269}
4270
4271void cpu_outl(CPUState *env, int addr, int val)
4272{
4273 int rc;
4274 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4275 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4276 if (RT_LIKELY(rc == VINF_SUCCESS))
4277 return;
4278 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4279 {
4280 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4281 remR3RaiseRC(env->pVM, rc);
4282 return;
4283 }
4284 remAbort(rc, __FUNCTION__);
4285}
4286
4287int cpu_inb(CPUState *env, int addr)
4288{
4289 uint32_t u32 = 0;
4290 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4291 if (RT_LIKELY(rc == VINF_SUCCESS))
4292 {
4293 if (/*addr != 0x61 && */addr != 0x71)
4294 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4295 return (int)u32;
4296 }
4297 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4298 {
4299 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4300 remR3RaiseRC(env->pVM, rc);
4301 return (int)u32;
4302 }
4303 remAbort(rc, __FUNCTION__);
4304 return 0xff;
4305}
4306
4307int cpu_inw(CPUState *env, int addr)
4308{
4309 uint32_t u32 = 0;
4310 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4311 if (RT_LIKELY(rc == VINF_SUCCESS))
4312 {
4313 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4314 return (int)u32;
4315 }
4316 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4317 {
4318 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4319 remR3RaiseRC(env->pVM, rc);
4320 return (int)u32;
4321 }
4322 remAbort(rc, __FUNCTION__);
4323 return 0xffff;
4324}
4325
4326int cpu_inl(CPUState *env, int addr)
4327{
4328 uint32_t u32 = 0;
4329 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4330 if (RT_LIKELY(rc == VINF_SUCCESS))
4331 {
4332//if (addr==0x01f0 && u32 == 0x6b6d)
4333// loglevel = ~0;
4334 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4335 return (int)u32;
4336 }
4337 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4338 {
4339 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4340 remR3RaiseRC(env->pVM, rc);
4341 return (int)u32;
4342 }
4343 remAbort(rc, __FUNCTION__);
4344 return 0xffffffff;
4345}
4346
4347#undef LOG_GROUP
4348#define LOG_GROUP LOG_GROUP_REM
4349
4350
4351/* -+- helpers and misc other interfaces -+- */
4352
4353/**
4354 * Perform the CPUID instruction.
4355 *
4356 * ASMCpuId cannot be invoked from some source files where this is used because of global
4357 * register allocations.
4358 *
4359 * @param env Pointer to the recompiler CPU structure.
4360 * @param uOperator CPUID operation (eax).
4361 * @param pvEAX Where to store eax.
4362 * @param pvEBX Where to store ebx.
4363 * @param pvECX Where to store ecx.
4364 * @param pvEDX Where to store edx.
4365 */
4366void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4367{
4368 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4369}
4370
4371
4372#if 0 /* not used */
4373/**
4374 * Interface for qemu hardware to report back fatal errors.
4375 */
4376void hw_error(const char *pszFormat, ...)
4377{
4378 /*
4379 * Bitch about it.
4380 */
4381 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4382 * this in my Odin32 tree at home! */
4383 va_list args;
4384 va_start(args, pszFormat);
4385 RTLogPrintf("fatal error in virtual hardware:");
4386 RTLogPrintfV(pszFormat, args);
4387 va_end(args);
4388 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4389
4390 /*
4391 * If we're in REM context we'll sync back the state before 'jumping' to
4392 * the EMs failure handling.
4393 */
4394 PVM pVM = cpu_single_env->pVM;
4395 if (pVM->rem.s.fInREM)
4396 REMR3StateBack(pVM);
4397 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4398 AssertMsgFailed(("EMR3FatalError returned!\n"));
4399}
4400#endif
4401
4402/**
4403 * Interface for the qemu cpu to report unhandled situation
4404 * raising a fatal VM error.
4405 */
4406void cpu_abort(CPUState *env, const char *pszFormat, ...)
4407{
4408 va_list va;
4409 PVM pVM;
4410 PVMCPU pVCpu;
4411 char szMsg[256];
4412
4413 /*
4414 * Bitch about it.
4415 */
4416 RTLogFlags(NULL, "nodisabled nobuffered");
4417 RTLogFlush(NULL);
4418
4419 va_start(va, pszFormat);
4420#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4421 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4422 unsigned cArgs = 0;
4423 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4424 const char *psz = strchr(pszFormat, '%');
4425 while (psz && cArgs < 6)
4426 {
4427 auArgs[cArgs++] = va_arg(va, uintptr_t);
4428 psz = strchr(psz + 1, '%');
4429 }
4430 switch (cArgs)
4431 {
4432 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4433 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4434 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4435 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4436 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4437 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4438 default:
4439 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4440 }
4441#else
4442 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4443#endif
4444 va_end(va);
4445
4446 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4447 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4448
4449 /*
4450 * If we're in REM context we'll sync back the state before 'jumping' to
4451 * the EMs failure handling.
4452 */
4453 pVM = cpu_single_env->pVM;
4454 pVCpu = cpu_single_env->pVCpu;
4455 Assert(pVCpu);
4456
4457 if (pVM->rem.s.fInREM)
4458 REMR3StateBack(pVM, pVCpu);
4459 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4460 AssertMsgFailed(("EMR3FatalError returned!\n"));
4461}
4462
4463
4464/**
4465 * Aborts the VM.
4466 *
4467 * @param rc VBox error code.
4468 * @param pszTip Hint about why/when this happend.
4469 */
4470void remAbort(int rc, const char *pszTip)
4471{
4472 PVM pVM;
4473 PVMCPU pVCpu;
4474
4475 /*
4476 * Bitch about it.
4477 */
4478 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4479 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4480
4481 /*
4482 * Jump back to where we entered the recompiler.
4483 */
4484 pVM = cpu_single_env->pVM;
4485 pVCpu = cpu_single_env->pVCpu;
4486 Assert(pVCpu);
4487
4488 if (pVM->rem.s.fInREM)
4489 REMR3StateBack(pVM, pVCpu);
4490
4491 EMR3FatalError(pVCpu, rc);
4492 AssertMsgFailed(("EMR3FatalError returned!\n"));
4493}
4494
4495
4496/**
4497 * Dumps a linux system call.
4498 * @param pVCpu VMCPU handle.
4499 */
4500void remR3DumpLnxSyscall(PVMCPU pVCpu)
4501{
4502 static const char *apsz[] =
4503 {
4504 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4505 "sys_exit",
4506 "sys_fork",
4507 "sys_read",
4508 "sys_write",
4509 "sys_open", /* 5 */
4510 "sys_close",
4511 "sys_waitpid",
4512 "sys_creat",
4513 "sys_link",
4514 "sys_unlink", /* 10 */
4515 "sys_execve",
4516 "sys_chdir",
4517 "sys_time",
4518 "sys_mknod",
4519 "sys_chmod", /* 15 */
4520 "sys_lchown16",
4521 "sys_ni_syscall", /* old break syscall holder */
4522 "sys_stat",
4523 "sys_lseek",
4524 "sys_getpid", /* 20 */
4525 "sys_mount",
4526 "sys_oldumount",
4527 "sys_setuid16",
4528 "sys_getuid16",
4529 "sys_stime", /* 25 */
4530 "sys_ptrace",
4531 "sys_alarm",
4532 "sys_fstat",
4533 "sys_pause",
4534 "sys_utime", /* 30 */
4535 "sys_ni_syscall", /* old stty syscall holder */
4536 "sys_ni_syscall", /* old gtty syscall holder */
4537 "sys_access",
4538 "sys_nice",
4539 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4540 "sys_sync",
4541 "sys_kill",
4542 "sys_rename",
4543 "sys_mkdir",
4544 "sys_rmdir", /* 40 */
4545 "sys_dup",
4546 "sys_pipe",
4547 "sys_times",
4548 "sys_ni_syscall", /* old prof syscall holder */
4549 "sys_brk", /* 45 */
4550 "sys_setgid16",
4551 "sys_getgid16",
4552 "sys_signal",
4553 "sys_geteuid16",
4554 "sys_getegid16", /* 50 */
4555 "sys_acct",
4556 "sys_umount", /* recycled never used phys() */
4557 "sys_ni_syscall", /* old lock syscall holder */
4558 "sys_ioctl",
4559 "sys_fcntl", /* 55 */
4560 "sys_ni_syscall", /* old mpx syscall holder */
4561 "sys_setpgid",
4562 "sys_ni_syscall", /* old ulimit syscall holder */
4563 "sys_olduname",
4564 "sys_umask", /* 60 */
4565 "sys_chroot",
4566 "sys_ustat",
4567 "sys_dup2",
4568 "sys_getppid",
4569 "sys_getpgrp", /* 65 */
4570 "sys_setsid",
4571 "sys_sigaction",
4572 "sys_sgetmask",
4573 "sys_ssetmask",
4574 "sys_setreuid16", /* 70 */
4575 "sys_setregid16",
4576 "sys_sigsuspend",
4577 "sys_sigpending",
4578 "sys_sethostname",
4579 "sys_setrlimit", /* 75 */
4580 "sys_old_getrlimit",
4581 "sys_getrusage",
4582 "sys_gettimeofday",
4583 "sys_settimeofday",
4584 "sys_getgroups16", /* 80 */
4585 "sys_setgroups16",
4586 "old_select",
4587 "sys_symlink",
4588 "sys_lstat",
4589 "sys_readlink", /* 85 */
4590 "sys_uselib",
4591 "sys_swapon",
4592 "sys_reboot",
4593 "old_readdir",
4594 "old_mmap", /* 90 */
4595 "sys_munmap",
4596 "sys_truncate",
4597 "sys_ftruncate",
4598 "sys_fchmod",
4599 "sys_fchown16", /* 95 */
4600 "sys_getpriority",
4601 "sys_setpriority",
4602 "sys_ni_syscall", /* old profil syscall holder */
4603 "sys_statfs",
4604 "sys_fstatfs", /* 100 */
4605 "sys_ioperm",
4606 "sys_socketcall",
4607 "sys_syslog",
4608 "sys_setitimer",
4609 "sys_getitimer", /* 105 */
4610 "sys_newstat",
4611 "sys_newlstat",
4612 "sys_newfstat",
4613 "sys_uname",
4614 "sys_iopl", /* 110 */
4615 "sys_vhangup",
4616 "sys_ni_syscall", /* old "idle" system call */
4617 "sys_vm86old",
4618 "sys_wait4",
4619 "sys_swapoff", /* 115 */
4620 "sys_sysinfo",
4621 "sys_ipc",
4622 "sys_fsync",
4623 "sys_sigreturn",
4624 "sys_clone", /* 120 */
4625 "sys_setdomainname",
4626 "sys_newuname",
4627 "sys_modify_ldt",
4628 "sys_adjtimex",
4629 "sys_mprotect", /* 125 */
4630 "sys_sigprocmask",
4631 "sys_ni_syscall", /* old "create_module" */
4632 "sys_init_module",
4633 "sys_delete_module",
4634 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4635 "sys_quotactl",
4636 "sys_getpgid",
4637 "sys_fchdir",
4638 "sys_bdflush",
4639 "sys_sysfs", /* 135 */
4640 "sys_personality",
4641 "sys_ni_syscall", /* reserved for afs_syscall */
4642 "sys_setfsuid16",
4643 "sys_setfsgid16",
4644 "sys_llseek", /* 140 */
4645 "sys_getdents",
4646 "sys_select",
4647 "sys_flock",
4648 "sys_msync",
4649 "sys_readv", /* 145 */
4650 "sys_writev",
4651 "sys_getsid",
4652 "sys_fdatasync",
4653 "sys_sysctl",
4654 "sys_mlock", /* 150 */
4655 "sys_munlock",
4656 "sys_mlockall",
4657 "sys_munlockall",
4658 "sys_sched_setparam",
4659 "sys_sched_getparam", /* 155 */
4660 "sys_sched_setscheduler",
4661 "sys_sched_getscheduler",
4662 "sys_sched_yield",
4663 "sys_sched_get_priority_max",
4664 "sys_sched_get_priority_min", /* 160 */
4665 "sys_sched_rr_get_interval",
4666 "sys_nanosleep",
4667 "sys_mremap",
4668 "sys_setresuid16",
4669 "sys_getresuid16", /* 165 */
4670 "sys_vm86",
4671 "sys_ni_syscall", /* Old sys_query_module */
4672 "sys_poll",
4673 "sys_nfsservctl",
4674 "sys_setresgid16", /* 170 */
4675 "sys_getresgid16",
4676 "sys_prctl",
4677 "sys_rt_sigreturn",
4678 "sys_rt_sigaction",
4679 "sys_rt_sigprocmask", /* 175 */
4680 "sys_rt_sigpending",
4681 "sys_rt_sigtimedwait",
4682 "sys_rt_sigqueueinfo",
4683 "sys_rt_sigsuspend",
4684 "sys_pread64", /* 180 */
4685 "sys_pwrite64",
4686 "sys_chown16",
4687 "sys_getcwd",
4688 "sys_capget",
4689 "sys_capset", /* 185 */
4690 "sys_sigaltstack",
4691 "sys_sendfile",
4692 "sys_ni_syscall", /* reserved for streams1 */
4693 "sys_ni_syscall", /* reserved for streams2 */
4694 "sys_vfork", /* 190 */
4695 "sys_getrlimit",
4696 "sys_mmap2",
4697 "sys_truncate64",
4698 "sys_ftruncate64",
4699 "sys_stat64", /* 195 */
4700 "sys_lstat64",
4701 "sys_fstat64",
4702 "sys_lchown",
4703 "sys_getuid",
4704 "sys_getgid", /* 200 */
4705 "sys_geteuid",
4706 "sys_getegid",
4707 "sys_setreuid",
4708 "sys_setregid",
4709 "sys_getgroups", /* 205 */
4710 "sys_setgroups",
4711 "sys_fchown",
4712 "sys_setresuid",
4713 "sys_getresuid",
4714 "sys_setresgid", /* 210 */
4715 "sys_getresgid",
4716 "sys_chown",
4717 "sys_setuid",
4718 "sys_setgid",
4719 "sys_setfsuid", /* 215 */
4720 "sys_setfsgid",
4721 "sys_pivot_root",
4722 "sys_mincore",
4723 "sys_madvise",
4724 "sys_getdents64", /* 220 */
4725 "sys_fcntl64",
4726 "sys_ni_syscall", /* reserved for TUX */
4727 "sys_ni_syscall",
4728 "sys_gettid",
4729 "sys_readahead", /* 225 */
4730 "sys_setxattr",
4731 "sys_lsetxattr",
4732 "sys_fsetxattr",
4733 "sys_getxattr",
4734 "sys_lgetxattr", /* 230 */
4735 "sys_fgetxattr",
4736 "sys_listxattr",
4737 "sys_llistxattr",
4738 "sys_flistxattr",
4739 "sys_removexattr", /* 235 */
4740 "sys_lremovexattr",
4741 "sys_fremovexattr",
4742 "sys_tkill",
4743 "sys_sendfile64",
4744 "sys_futex", /* 240 */
4745 "sys_sched_setaffinity",
4746 "sys_sched_getaffinity",
4747 "sys_set_thread_area",
4748 "sys_get_thread_area",
4749 "sys_io_setup", /* 245 */
4750 "sys_io_destroy",
4751 "sys_io_getevents",
4752 "sys_io_submit",
4753 "sys_io_cancel",
4754 "sys_fadvise64", /* 250 */
4755 "sys_ni_syscall",
4756 "sys_exit_group",
4757 "sys_lookup_dcookie",
4758 "sys_epoll_create",
4759 "sys_epoll_ctl", /* 255 */
4760 "sys_epoll_wait",
4761 "sys_remap_file_pages",
4762 "sys_set_tid_address",
4763 "sys_timer_create",
4764 "sys_timer_settime", /* 260 */
4765 "sys_timer_gettime",
4766 "sys_timer_getoverrun",
4767 "sys_timer_delete",
4768 "sys_clock_settime",
4769 "sys_clock_gettime", /* 265 */
4770 "sys_clock_getres",
4771 "sys_clock_nanosleep",
4772 "sys_statfs64",
4773 "sys_fstatfs64",
4774 "sys_tgkill", /* 270 */
4775 "sys_utimes",
4776 "sys_fadvise64_64",
4777 "sys_ni_syscall" /* sys_vserver */
4778 };
4779
4780 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4781 switch (uEAX)
4782 {
4783 default:
4784 if (uEAX < RT_ELEMENTS(apsz))
4785 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4786 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4787 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4788 else
4789 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4790 break;
4791
4792 }
4793}
4794
4795
4796/**
4797 * Dumps an OpenBSD system call.
4798 * @param pVCpu VMCPU handle.
4799 */
4800void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4801{
4802 static const char *apsz[] =
4803 {
4804 "SYS_syscall", //0
4805 "SYS_exit", //1
4806 "SYS_fork", //2
4807 "SYS_read", //3
4808 "SYS_write", //4
4809 "SYS_open", //5
4810 "SYS_close", //6
4811 "SYS_wait4", //7
4812 "SYS_8",
4813 "SYS_link", //9
4814 "SYS_unlink", //10
4815 "SYS_11",
4816 "SYS_chdir", //12
4817 "SYS_fchdir", //13
4818 "SYS_mknod", //14
4819 "SYS_chmod", //15
4820 "SYS_chown", //16
4821 "SYS_break", //17
4822 "SYS_18",
4823 "SYS_19",
4824 "SYS_getpid", //20
4825 "SYS_mount", //21
4826 "SYS_unmount", //22
4827 "SYS_setuid", //23
4828 "SYS_getuid", //24
4829 "SYS_geteuid", //25
4830 "SYS_ptrace", //26
4831 "SYS_recvmsg", //27
4832 "SYS_sendmsg", //28
4833 "SYS_recvfrom", //29
4834 "SYS_accept", //30
4835 "SYS_getpeername", //31
4836 "SYS_getsockname", //32
4837 "SYS_access", //33
4838 "SYS_chflags", //34
4839 "SYS_fchflags", //35
4840 "SYS_sync", //36
4841 "SYS_kill", //37
4842 "SYS_38",
4843 "SYS_getppid", //39
4844 "SYS_40",
4845 "SYS_dup", //41
4846 "SYS_opipe", //42
4847 "SYS_getegid", //43
4848 "SYS_profil", //44
4849 "SYS_ktrace", //45
4850 "SYS_sigaction", //46
4851 "SYS_getgid", //47
4852 "SYS_sigprocmask", //48
4853 "SYS_getlogin", //49
4854 "SYS_setlogin", //50
4855 "SYS_acct", //51
4856 "SYS_sigpending", //52
4857 "SYS_osigaltstack", //53
4858 "SYS_ioctl", //54
4859 "SYS_reboot", //55
4860 "SYS_revoke", //56
4861 "SYS_symlink", //57
4862 "SYS_readlink", //58
4863 "SYS_execve", //59
4864 "SYS_umask", //60
4865 "SYS_chroot", //61
4866 "SYS_62",
4867 "SYS_63",
4868 "SYS_64",
4869 "SYS_65",
4870 "SYS_vfork", //66
4871 "SYS_67",
4872 "SYS_68",
4873 "SYS_sbrk", //69
4874 "SYS_sstk", //70
4875 "SYS_61",
4876 "SYS_vadvise", //72
4877 "SYS_munmap", //73
4878 "SYS_mprotect", //74
4879 "SYS_madvise", //75
4880 "SYS_76",
4881 "SYS_77",
4882 "SYS_mincore", //78
4883 "SYS_getgroups", //79
4884 "SYS_setgroups", //80
4885 "SYS_getpgrp", //81
4886 "SYS_setpgid", //82
4887 "SYS_setitimer", //83
4888 "SYS_84",
4889 "SYS_85",
4890 "SYS_getitimer", //86
4891 "SYS_87",
4892 "SYS_88",
4893 "SYS_89",
4894 "SYS_dup2", //90
4895 "SYS_91",
4896 "SYS_fcntl", //92
4897 "SYS_select", //93
4898 "SYS_94",
4899 "SYS_fsync", //95
4900 "SYS_setpriority", //96
4901 "SYS_socket", //97
4902 "SYS_connect", //98
4903 "SYS_99",
4904 "SYS_getpriority", //100
4905 "SYS_101",
4906 "SYS_102",
4907 "SYS_sigreturn", //103
4908 "SYS_bind", //104
4909 "SYS_setsockopt", //105
4910 "SYS_listen", //106
4911 "SYS_107",
4912 "SYS_108",
4913 "SYS_109",
4914 "SYS_110",
4915 "SYS_sigsuspend", //111
4916 "SYS_112",
4917 "SYS_113",
4918 "SYS_114",
4919 "SYS_115",
4920 "SYS_gettimeofday", //116
4921 "SYS_getrusage", //117
4922 "SYS_getsockopt", //118
4923 "SYS_119",
4924 "SYS_readv", //120
4925 "SYS_writev", //121
4926 "SYS_settimeofday", //122
4927 "SYS_fchown", //123
4928 "SYS_fchmod", //124
4929 "SYS_125",
4930 "SYS_setreuid", //126
4931 "SYS_setregid", //127
4932 "SYS_rename", //128
4933 "SYS_129",
4934 "SYS_130",
4935 "SYS_flock", //131
4936 "SYS_mkfifo", //132
4937 "SYS_sendto", //133
4938 "SYS_shutdown", //134
4939 "SYS_socketpair", //135
4940 "SYS_mkdir", //136
4941 "SYS_rmdir", //137
4942 "SYS_utimes", //138
4943 "SYS_139",
4944 "SYS_adjtime", //140
4945 "SYS_141",
4946 "SYS_142",
4947 "SYS_143",
4948 "SYS_144",
4949 "SYS_145",
4950 "SYS_146",
4951 "SYS_setsid", //147
4952 "SYS_quotactl", //148
4953 "SYS_149",
4954 "SYS_150",
4955 "SYS_151",
4956 "SYS_152",
4957 "SYS_153",
4958 "SYS_154",
4959 "SYS_nfssvc", //155
4960 "SYS_156",
4961 "SYS_157",
4962 "SYS_158",
4963 "SYS_159",
4964 "SYS_160",
4965 "SYS_getfh", //161
4966 "SYS_162",
4967 "SYS_163",
4968 "SYS_164",
4969 "SYS_sysarch", //165
4970 "SYS_166",
4971 "SYS_167",
4972 "SYS_168",
4973 "SYS_169",
4974 "SYS_170",
4975 "SYS_171",
4976 "SYS_172",
4977 "SYS_pread", //173
4978 "SYS_pwrite", //174
4979 "SYS_175",
4980 "SYS_176",
4981 "SYS_177",
4982 "SYS_178",
4983 "SYS_179",
4984 "SYS_180",
4985 "SYS_setgid", //181
4986 "SYS_setegid", //182
4987 "SYS_seteuid", //183
4988 "SYS_lfs_bmapv", //184
4989 "SYS_lfs_markv", //185
4990 "SYS_lfs_segclean", //186
4991 "SYS_lfs_segwait", //187
4992 "SYS_188",
4993 "SYS_189",
4994 "SYS_190",
4995 "SYS_pathconf", //191
4996 "SYS_fpathconf", //192
4997 "SYS_swapctl", //193
4998 "SYS_getrlimit", //194
4999 "SYS_setrlimit", //195
5000 "SYS_getdirentries", //196
5001 "SYS_mmap", //197
5002 "SYS___syscall", //198
5003 "SYS_lseek", //199
5004 "SYS_truncate", //200
5005 "SYS_ftruncate", //201
5006 "SYS___sysctl", //202
5007 "SYS_mlock", //203
5008 "SYS_munlock", //204
5009 "SYS_205",
5010 "SYS_futimes", //206
5011 "SYS_getpgid", //207
5012 "SYS_xfspioctl", //208
5013 "SYS_209",
5014 "SYS_210",
5015 "SYS_211",
5016 "SYS_212",
5017 "SYS_213",
5018 "SYS_214",
5019 "SYS_215",
5020 "SYS_216",
5021 "SYS_217",
5022 "SYS_218",
5023 "SYS_219",
5024 "SYS_220",
5025 "SYS_semget", //221
5026 "SYS_222",
5027 "SYS_223",
5028 "SYS_224",
5029 "SYS_msgget", //225
5030 "SYS_msgsnd", //226
5031 "SYS_msgrcv", //227
5032 "SYS_shmat", //228
5033 "SYS_229",
5034 "SYS_shmdt", //230
5035 "SYS_231",
5036 "SYS_clock_gettime", //232
5037 "SYS_clock_settime", //233
5038 "SYS_clock_getres", //234
5039 "SYS_235",
5040 "SYS_236",
5041 "SYS_237",
5042 "SYS_238",
5043 "SYS_239",
5044 "SYS_nanosleep", //240
5045 "SYS_241",
5046 "SYS_242",
5047 "SYS_243",
5048 "SYS_244",
5049 "SYS_245",
5050 "SYS_246",
5051 "SYS_247",
5052 "SYS_248",
5053 "SYS_249",
5054 "SYS_minherit", //250
5055 "SYS_rfork", //251
5056 "SYS_poll", //252
5057 "SYS_issetugid", //253
5058 "SYS_lchown", //254
5059 "SYS_getsid", //255
5060 "SYS_msync", //256
5061 "SYS_257",
5062 "SYS_258",
5063 "SYS_259",
5064 "SYS_getfsstat", //260
5065 "SYS_statfs", //261
5066 "SYS_fstatfs", //262
5067 "SYS_pipe", //263
5068 "SYS_fhopen", //264
5069 "SYS_265",
5070 "SYS_fhstatfs", //266
5071 "SYS_preadv", //267
5072 "SYS_pwritev", //268
5073 "SYS_kqueue", //269
5074 "SYS_kevent", //270
5075 "SYS_mlockall", //271
5076 "SYS_munlockall", //272
5077 "SYS_getpeereid", //273
5078 "SYS_274",
5079 "SYS_275",
5080 "SYS_276",
5081 "SYS_277",
5082 "SYS_278",
5083 "SYS_279",
5084 "SYS_280",
5085 "SYS_getresuid", //281
5086 "SYS_setresuid", //282
5087 "SYS_getresgid", //283
5088 "SYS_setresgid", //284
5089 "SYS_285",
5090 "SYS_mquery", //286
5091 "SYS_closefrom", //287
5092 "SYS_sigaltstack", //288
5093 "SYS_shmget", //289
5094 "SYS_semop", //290
5095 "SYS_stat", //291
5096 "SYS_fstat", //292
5097 "SYS_lstat", //293
5098 "SYS_fhstat", //294
5099 "SYS___semctl", //295
5100 "SYS_shmctl", //296
5101 "SYS_msgctl", //297
5102 "SYS_MAXSYSCALL", //298
5103 //299
5104 //300
5105 };
5106 uint32_t uEAX;
5107 if (!LogIsEnabled())
5108 return;
5109 uEAX = CPUMGetGuestEAX(pVCpu);
5110 switch (uEAX)
5111 {
5112 default:
5113 if (uEAX < RT_ELEMENTS(apsz))
5114 {
5115 uint32_t au32Args[8] = {0};
5116 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5117 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5118 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5119 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5120 }
5121 else
5122 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5123 break;
5124 }
5125}
5126
5127
5128#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5129/**
5130 * The Dll main entry point (stub).
5131 */
5132bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5133{
5134 return true;
5135}
5136
5137void *memcpy(void *dst, const void *src, size_t size)
5138{
5139 uint8_t*pbDst = dst, *pbSrc = src;
5140 while (size-- > 0)
5141 *pbDst++ = *pbSrc++;
5142 return dst;
5143}
5144
5145#endif
5146
5147void cpu_smm_update(CPUState *env)
5148{
5149}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette