VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 56290

Last change on this file since 56290 was 56048, checked in by vboxsync, 9 years ago

pgm.h,++: Changed the return type of PDMPhysRead, PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr to VBOXSTRICTRC, tracking down all the currently possible return values for future ring-0 and raw-mode context handlers. Prepared IEM and PGM for new r0+rc pfnHandlers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 184.6 KB
Line 
1/* $Id: VBoxRecompiler.c 56048 2015-05-23 20:28:52Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/vmm/uvm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50
51#include <VBox/log.h>
52#include <iprt/alloca.h>
53#include <iprt/semaphore.h>
54#include <iprt/asm.h>
55#include <iprt/assert.h>
56#include <iprt/thread.h>
57#include <iprt/string.h>
58
59/* Don't wanna include everything. */
60extern void cpu_exec_init_all(uintptr_t tb_size);
61extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
62extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
63extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
64extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
65extern void tlb_flush(CPUX86State *env, int flush_global);
66extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
67extern void sync_ldtr(CPUX86State *env1, int selector);
68
69#ifdef VBOX_STRICT
70ram_addr_t get_phys_page_offset(target_ulong addr);
71#endif
72
73
74/*******************************************************************************
75* Defined Constants And Macros *
76*******************************************************************************/
77
78/** Copy 80-bit fpu register at pSrc to pDst.
79 * This is probably faster than *calling* memcpy.
80 */
81#define REM_COPY_FPU_REG(pDst, pSrc) \
82 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
83
84/** How remR3RunLoggingStep operates. */
85#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
86
87
88/** Selector flag shift between qemu and VBox.
89 * VBox shifts the qemu bits to the right. */
90#define SEL_FLAGS_SHIFT (8)
91/** Mask applied to the shifted qemu selector flags to get the attributes VBox
92 * (VT-x) needs. */
93#define SEL_FLAGS_SMASK UINT32_C(0x1F0FF)
94
95
96/*******************************************************************************
97* Internal Functions *
98*******************************************************************************/
99static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
100static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
101static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
102static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
103
104static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys);
105static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys);
106static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys);
107static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
108static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
109static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
110
111static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
112static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
113static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
114static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
115static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
116static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
117
118static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
119static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
120static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
121
122
123/*******************************************************************************
124* Global Variables *
125*******************************************************************************/
126
127/** @todo Move stats to REM::s some rainy day we have nothing do to. */
128#ifdef VBOX_WITH_STATISTICS
129static STAMPROFILEADV gStatExecuteSingleInstr;
130static STAMPROFILEADV gStatCompilationQEmu;
131static STAMPROFILEADV gStatRunCodeQEmu;
132static STAMPROFILEADV gStatTotalTimeQEmu;
133static STAMPROFILEADV gStatTimers;
134static STAMPROFILEADV gStatTBLookup;
135static STAMPROFILEADV gStatIRQ;
136static STAMPROFILEADV gStatRawCheck;
137static STAMPROFILEADV gStatMemRead;
138static STAMPROFILEADV gStatMemWrite;
139static STAMPROFILE gStatGCPhys2HCVirt;
140static STAMCOUNTER gStatCpuGetTSC;
141static STAMCOUNTER gStatRefuseTFInhibit;
142static STAMCOUNTER gStatRefuseVM86;
143static STAMCOUNTER gStatRefusePaging;
144static STAMCOUNTER gStatRefusePAE;
145static STAMCOUNTER gStatRefuseIOPLNot0;
146static STAMCOUNTER gStatRefuseIF0;
147static STAMCOUNTER gStatRefuseCode16;
148static STAMCOUNTER gStatRefuseWP0;
149static STAMCOUNTER gStatRefuseRing1or2;
150static STAMCOUNTER gStatRefuseCanExecute;
151static STAMCOUNTER gaStatRefuseStale[6];
152static STAMCOUNTER gStatREMGDTChange;
153static STAMCOUNTER gStatREMIDTChange;
154static STAMCOUNTER gStatREMLDTRChange;
155static STAMCOUNTER gStatREMTRChange;
156static STAMCOUNTER gStatSelOutOfSync[6];
157static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
158static STAMCOUNTER gStatFlushTBs;
159#endif
160/* in exec.c */
161extern uint32_t tlb_flush_count;
162extern uint32_t tb_flush_count;
163extern uint32_t tb_phys_invalidate_count;
164
165/*
166 * Global stuff.
167 */
168
169/** MMIO read callbacks. */
170CPUReadMemoryFunc *g_apfnMMIORead[3] =
171{
172 remR3MMIOReadU8,
173 remR3MMIOReadU16,
174 remR3MMIOReadU32
175};
176
177/** MMIO write callbacks. */
178CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
179{
180 remR3MMIOWriteU8,
181 remR3MMIOWriteU16,
182 remR3MMIOWriteU32
183};
184
185/** Handler read callbacks. */
186CPUReadMemoryFunc *g_apfnHandlerRead[3] =
187{
188 remR3HandlerReadU8,
189 remR3HandlerReadU16,
190 remR3HandlerReadU32
191};
192
193/** Handler write callbacks. */
194CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
195{
196 remR3HandlerWriteU8,
197 remR3HandlerWriteU16,
198 remR3HandlerWriteU32
199};
200
201
202#ifdef VBOX_WITH_DEBUGGER
203/*
204 * Debugger commands.
205 */
206static FNDBGCCMD remR3CmdDisasEnableStepping;;
207
208/** '.remstep' arguments. */
209static const DBGCVARDESC g_aArgRemStep[] =
210{
211 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
212 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
213};
214
215/** Command descriptors. */
216static const DBGCCMD g_aCmds[] =
217{
218 {
219 .pszCmd ="remstep",
220 .cArgsMin = 0,
221 .cArgsMax = 1,
222 .paArgDescs = &g_aArgRemStep[0],
223 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
224 .fFlags = 0,
225 .pfnHandler = remR3CmdDisasEnableStepping,
226 .pszSyntax = "[on/off]",
227 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
228 "If no arguments show the current state."
229 }
230};
231#endif
232
233/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
234 * @todo huh??? That cannot be the case on the mac... So, this
235 * point is probably not valid any longer. */
236uint8_t *code_gen_prologue;
237
238
239/*******************************************************************************
240* Internal Functions *
241*******************************************************************************/
242void remAbort(int rc, const char *pszTip);
243extern int testmath(void);
244
245/* Put them here to avoid unused variable warning. */
246AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
247#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
248//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
249/* Why did this have to be identical?? */
250AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
251#else
252AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
253#endif
254
255
256/**
257 * Initializes the REM.
258 *
259 * @returns VBox status code.
260 * @param pVM The VM to operate on.
261 */
262REMR3DECL(int) REMR3Init(PVM pVM)
263{
264 PREMHANDLERNOTIFICATION pCur;
265 uint32_t u32Dummy;
266 int rc;
267 unsigned i;
268
269#ifdef VBOX_ENABLE_VBOXREM64
270 LogRel(("Using 64-bit aware REM\n"));
271#endif
272
273 /*
274 * Assert sanity.
275 */
276 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
277 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
278 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
279#if 0 /* just an annoyance at the moment. */
280#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
281 Assert(!testmath());
282#endif
283#endif
284
285 /*
286 * Init some internal data members.
287 */
288 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
289 pVM->rem.s.Env.pVM = pVM;
290#ifdef CPU_RAW_MODE_INIT
291 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
292#endif
293
294 /*
295 * Initialize the REM critical section.
296 *
297 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
298 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
299 * deadlocks. (mostly pgm vs rem locking)
300 */
301 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
302 AssertRCReturn(rc, rc);
303
304 /* ctx. */
305 pVM->rem.s.pCtx = NULL; /* set when executing code. */
306 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
307
308 /* ignore all notifications */
309 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
310
311 code_gen_prologue = RTMemExecAlloc(_1K);
312 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
313
314 cpu_exec_init_all(0);
315
316 /*
317 * Init the recompiler.
318 */
319 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
320 {
321 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
322 return VERR_GENERAL_FAILURE;
323 }
324 PVMCPU pVCpu = VMMGetCpu(pVM);
325 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
326 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
327
328 EMRemLock(pVM);
329 cpu_reset(&pVM->rem.s.Env);
330 EMRemUnlock(pVM);
331
332 /* allocate code buffer for single instruction emulation. */
333 pVM->rem.s.Env.cbCodeBuffer = 4096;
334 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
335 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
336
337 /* Finally, set the cpu_single_env global. */
338 cpu_single_env = &pVM->rem.s.Env;
339
340 /* Nothing is pending by default */
341 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
342
343 /*
344 * Register ram types.
345 */
346 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, &pVM->rem.s.Env);
347 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
348 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
349 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
350 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
351
352 /* stop ignoring. */
353 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
354
355 /*
356 * Register the saved state data unit.
357 */
358 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
359 NULL, NULL, NULL,
360 NULL, remR3Save, NULL,
361 NULL, remR3Load, NULL);
362 if (RT_FAILURE(rc))
363 return rc;
364
365#ifdef VBOX_WITH_DEBUGGER
366 /*
367 * Debugger commands.
368 */
369 static bool fRegisteredCmds = false;
370 if (!fRegisteredCmds)
371 {
372 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
373 if (RT_SUCCESS(rc))
374 fRegisteredCmds = true;
375 }
376#endif
377
378#ifdef VBOX_WITH_STATISTICS
379 /*
380 * Statistics.
381 */
382 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
383 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
384 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
385 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
386 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
387 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
388 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
389 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
390 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
391 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
392 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
393
394 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
395
396 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
397 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
398 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
399 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
400 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
401 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
402 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
403 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
404 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
405 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
406 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES");
407 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS");
408 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS");
409 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS");
410 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS");
411 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS");
412 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
413
414 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
415 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
416 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
417 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
418
419 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
420 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
421 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
422 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
423 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
424 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
425
426 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
427 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
428 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
429 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
430 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
431 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
432
433 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
434#endif /* VBOX_WITH_STATISTICS */
435 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
436 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
437
438 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
439 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
440 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
441
442
443#ifdef DEBUG_ALL_LOGGING
444 loglevel = ~0;
445#endif
446
447 /*
448 * Init the handler notification lists.
449 */
450 pVM->rem.s.idxPendingList = UINT32_MAX;
451 pVM->rem.s.idxFreeList = 0;
452
453 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
454 {
455 pCur = &pVM->rem.s.aHandlerNotifications[i];
456 pCur->idxNext = i + 1;
457 pCur->idxSelf = i;
458 }
459 pCur->idxNext = UINT32_MAX; /* the last record. */
460
461 return rc;
462}
463
464
465/**
466 * Finalizes the REM initialization.
467 *
468 * This is called after all components, devices and drivers has
469 * been initialized. Its main purpose it to finish the RAM related
470 * initialization.
471 *
472 * @returns VBox status code.
473 *
474 * @param pVM The VM handle.
475 */
476REMR3DECL(int) REMR3InitFinalize(PVM pVM)
477{
478 int rc;
479
480 /*
481 * Ram size & dirty bit map.
482 */
483 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
484 pVM->rem.s.fGCPhysLastRamFixed = true;
485#ifdef RT_STRICT
486 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
487#else
488 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
489#endif
490 return rc;
491}
492
493/**
494 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
495 *
496 * @returns VBox status code.
497 * @param pVM The VM handle.
498 * @param fGuarded Whether to guard the map.
499 */
500static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
501{
502 int rc = VINF_SUCCESS;
503 RTGCPHYS cb;
504
505 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
506
507 cb = pVM->rem.s.GCPhysLastRam + 1;
508 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
509 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
510 VERR_OUT_OF_RANGE);
511
512 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
513 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
514
515 if (!fGuarded)
516 {
517 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
518 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
519 }
520 else
521 {
522 /*
523 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
524 */
525 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
526 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
527 if (cbBitmapFull == cbBitmapAligned)
528 cbBitmapFull += _4G >> PAGE_SHIFT;
529 else if (cbBitmapFull - cbBitmapAligned < _64K)
530 cbBitmapFull += _64K;
531
532 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
533 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
534
535 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
536 if (RT_FAILURE(rc))
537 {
538 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
539 AssertLogRelRCReturn(rc, rc);
540 }
541
542 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
543 }
544
545 /* initialize it. */
546 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
547 return rc;
548}
549
550
551/**
552 * Terminates the REM.
553 *
554 * Termination means cleaning up and freeing all resources,
555 * the VM it self is at this point powered off or suspended.
556 *
557 * @returns VBox status code.
558 * @param pVM The VM to operate on.
559 */
560REMR3DECL(int) REMR3Term(PVM pVM)
561{
562 /*
563 * Statistics.
564 */
565 STAMR3Deregister(pVM->pUVM, "/PROF/REM/*");
566 STAMR3Deregister(pVM->pUVM, "/REM/*");
567
568 return VINF_SUCCESS;
569}
570
571
572/**
573 * The VM is being reset.
574 *
575 * For the REM component this means to call the cpu_reset() and
576 * reinitialize some state variables.
577 *
578 * @param pVM VM handle.
579 */
580REMR3DECL(void) REMR3Reset(PVM pVM)
581{
582 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
583
584 /*
585 * Reset the REM cpu.
586 */
587 Assert(pVM->rem.s.cIgnoreAll == 0);
588 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
589 cpu_reset(&pVM->rem.s.Env);
590 pVM->rem.s.cInvalidatedPages = 0;
591 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
592 Assert(pVM->rem.s.cIgnoreAll == 0);
593
594 /* Clear raw ring 0 init state */
595 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
596
597 /* Flush the TBs the next time we execute code here. */
598 pVM->rem.s.fFlushTBs = true;
599
600 EMRemUnlock(pVM);
601}
602
603
604/**
605 * Execute state save operation.
606 *
607 * @returns VBox status code.
608 * @param pVM VM Handle.
609 * @param pSSM SSM operation handle.
610 */
611static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
612{
613 PREM pRem = &pVM->rem.s;
614
615 /*
616 * Save the required CPU Env bits.
617 * (Not much because we're never in REM when doing the save.)
618 */
619 LogFlow(("remR3Save:\n"));
620 Assert(!pRem->fInREM);
621 SSMR3PutU32(pSSM, pRem->Env.hflags);
622 SSMR3PutU32(pSSM, ~0); /* separator */
623
624 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
625 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
626 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
627
628 return SSMR3PutU32(pSSM, ~0); /* terminator */
629}
630
631
632/**
633 * Execute state load operation.
634 *
635 * @returns VBox status code.
636 * @param pVM VM Handle.
637 * @param pSSM SSM operation handle.
638 * @param uVersion Data layout version.
639 * @param uPass The data pass.
640 */
641static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
642{
643 uint32_t u32Dummy;
644 uint32_t fRawRing0 = false;
645 uint32_t u32Sep;
646 uint32_t i;
647 int rc;
648 PREM pRem;
649
650 LogFlow(("remR3Load:\n"));
651 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
652
653 /*
654 * Validate version.
655 */
656 if ( uVersion != REM_SAVED_STATE_VERSION
657 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
658 {
659 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
660 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
661 }
662
663 /*
664 * Do a reset to be on the safe side...
665 */
666 REMR3Reset(pVM);
667
668 /*
669 * Ignore all ignorable notifications.
670 * (Not doing this will cause serious trouble.)
671 */
672 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
673
674 /*
675 * Load the required CPU Env bits.
676 * (Not much because we're never in REM when doing the save.)
677 */
678 pRem = &pVM->rem.s;
679 Assert(!pRem->fInREM);
680 SSMR3GetU32(pSSM, &pRem->Env.hflags);
681 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
682 {
683 /* Redundant REM CPU state has to be loaded, but can be ignored. */
684 CPUX86State_Ver16 temp;
685 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
686 }
687
688 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
689 if (RT_FAILURE(rc))
690 return rc;
691 if (u32Sep != ~0U)
692 {
693 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
694 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
695 }
696
697 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
698 SSMR3GetUInt(pSSM, &fRawRing0);
699 if (fRawRing0)
700 pRem->Env.state |= CPU_RAW_RING0;
701
702 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
703 {
704 /*
705 * Load the REM stuff.
706 */
707 /** @todo r=bird: We should just drop all these items, restoring doesn't make
708 * sense. */
709 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
710 if (RT_FAILURE(rc))
711 return rc;
712 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
713 {
714 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
715 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
716 }
717 for (i = 0; i < pRem->cInvalidatedPages; i++)
718 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
719 }
720
721 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
722 if (RT_FAILURE(rc))
723 return rc;
724
725 /* check the terminator. */
726 rc = SSMR3GetU32(pSSM, &u32Sep);
727 if (RT_FAILURE(rc))
728 return rc;
729 if (u32Sep != ~0U)
730 {
731 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
732 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
733 }
734
735 /*
736 * Get the CPUID features.
737 */
738 PVMCPU pVCpu = VMMGetCpu(pVM);
739 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
740 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
741
742 /*
743 * Stop ignoring ignorable notifications.
744 */
745 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
746
747 /*
748 * Sync the whole CPU state when executing code in the recompiler.
749 */
750 for (i = 0; i < pVM->cCpus; i++)
751 {
752 PVMCPU pVCpu = &pVM->aCpus[i];
753 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
754 }
755 return VINF_SUCCESS;
756}
757
758
759
760#undef LOG_GROUP
761#define LOG_GROUP LOG_GROUP_REM_RUN
762
763/**
764 * Single steps an instruction in recompiled mode.
765 *
766 * Before calling this function the REM state needs to be in sync with
767 * the VM. Call REMR3State() to perform the sync. It's only necessary
768 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
769 * and after calling REMR3StateBack().
770 *
771 * @returns VBox status code.
772 *
773 * @param pVM VM Handle.
774 * @param pVCpu VMCPU Handle.
775 */
776REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
777{
778 int rc, interrupt_request;
779 RTGCPTR GCPtrPC;
780 bool fBp;
781
782 /*
783 * Lock the REM - we don't wanna have anyone interrupting us
784 * while stepping - and enabled single stepping. We also ignore
785 * pending interrupts and suchlike.
786 */
787 interrupt_request = pVM->rem.s.Env.interrupt_request;
788 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
789 pVM->rem.s.Env.interrupt_request = 0;
790 cpu_single_step(&pVM->rem.s.Env, 1);
791
792 /*
793 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
794 */
795 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
796 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
797
798 /*
799 * Execute and handle the return code.
800 * We execute without enabling the cpu tick, so on success we'll
801 * just flip it on and off to make sure it moves
802 */
803 rc = cpu_exec(&pVM->rem.s.Env);
804 if (rc == EXCP_DEBUG)
805 {
806 TMR3NotifyResume(pVM, pVCpu);
807 TMR3NotifySuspend(pVM, pVCpu);
808 rc = VINF_EM_DBG_STEPPED;
809 }
810 else
811 {
812 switch (rc)
813 {
814 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
815 case EXCP_HLT:
816 case EXCP_HALTED: rc = VINF_EM_HALT; break;
817 case EXCP_RC:
818 rc = pVM->rem.s.rc;
819 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
820 break;
821 case EXCP_EXECUTE_RAW:
822 case EXCP_EXECUTE_HM:
823 /** @todo: is it correct? No! */
824 rc = VINF_SUCCESS;
825 break;
826 default:
827 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
828 rc = VERR_INTERNAL_ERROR;
829 break;
830 }
831 }
832
833 /*
834 * Restore the stuff we changed to prevent interruption.
835 * Unlock the REM.
836 */
837 if (fBp)
838 {
839 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
840 Assert(rc2 == 0); NOREF(rc2);
841 }
842 cpu_single_step(&pVM->rem.s.Env, 0);
843 pVM->rem.s.Env.interrupt_request = interrupt_request;
844
845 return rc;
846}
847
848
849/**
850 * Set a breakpoint using the REM facilities.
851 *
852 * @returns VBox status code.
853 * @param pVM The VM handle.
854 * @param Address The breakpoint address.
855 * @thread The emulation thread.
856 */
857REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
858{
859 VM_ASSERT_EMT(pVM);
860 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
861 {
862 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
863 return VINF_SUCCESS;
864 }
865 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
866 return VERR_REM_NO_MORE_BP_SLOTS;
867}
868
869
870/**
871 * Clears a breakpoint set by REMR3BreakpointSet().
872 *
873 * @returns VBox status code.
874 * @param pVM The VM handle.
875 * @param Address The breakpoint address.
876 * @thread The emulation thread.
877 */
878REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
879{
880 VM_ASSERT_EMT(pVM);
881 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
882 {
883 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
884 return VINF_SUCCESS;
885 }
886 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
887 return VERR_REM_BP_NOT_FOUND;
888}
889
890
891/**
892 * Emulate an instruction.
893 *
894 * This function executes one instruction without letting anyone
895 * interrupt it. This is intended for being called while being in
896 * raw mode and thus will take care of all the state syncing between
897 * REM and the rest.
898 *
899 * @returns VBox status code.
900 * @param pVM VM handle.
901 * @param pVCpu VMCPU Handle.
902 */
903REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
904{
905 bool fFlushTBs;
906
907 int rc, rc2;
908 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
909
910 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
911 * CPU_RAW_HM makes sure we never execute interrupt handlers in the recompiler.
912 */
913 if (HMIsEnabled(pVM))
914 pVM->rem.s.Env.state |= CPU_RAW_HM;
915
916 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
917 fFlushTBs = pVM->rem.s.fFlushTBs;
918 pVM->rem.s.fFlushTBs = false;
919
920 /*
921 * Sync the state and enable single instruction / single stepping.
922 */
923 rc = REMR3State(pVM, pVCpu);
924 pVM->rem.s.fFlushTBs = fFlushTBs;
925 if (RT_SUCCESS(rc))
926 {
927 int interrupt_request = pVM->rem.s.Env.interrupt_request;
928 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
929#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
930 cpu_single_step(&pVM->rem.s.Env, 0);
931#endif
932 Assert(!pVM->rem.s.Env.singlestep_enabled);
933
934 /*
935 * Now we set the execute single instruction flag and enter the cpu_exec loop.
936 */
937 TMNotifyStartOfExecution(pVCpu);
938 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
939 rc = cpu_exec(&pVM->rem.s.Env);
940 TMNotifyEndOfExecution(pVCpu);
941 switch (rc)
942 {
943 /*
944 * Executed without anything out of the way happening.
945 */
946 case EXCP_SINGLE_INSTR:
947 rc = VINF_EM_RESCHEDULE;
948 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
949 break;
950
951 /*
952 * If we take a trap or start servicing a pending interrupt, we might end up here.
953 * (Timer thread or some other thread wishing EMT's attention.)
954 */
955 case EXCP_INTERRUPT:
956 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
957 rc = VINF_EM_RESCHEDULE;
958 break;
959
960 /*
961 * Single step, we assume!
962 * If there was a breakpoint there we're fucked now.
963 */
964 case EXCP_DEBUG:
965 if (pVM->rem.s.Env.watchpoint_hit)
966 {
967 /** @todo deal with watchpoints */
968 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
969 rc = VINF_EM_DBG_BREAKPOINT;
970 }
971 else
972 {
973 CPUBreakpoint *pBP;
974 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
975 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
976 if (pBP->pc == GCPtrPC)
977 break;
978 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
979 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
980 }
981 break;
982
983 /*
984 * hlt instruction.
985 */
986 case EXCP_HLT:
987 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
988 rc = VINF_EM_HALT;
989 break;
990
991 /*
992 * The VM has halted.
993 */
994 case EXCP_HALTED:
995 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
996 rc = VINF_EM_HALT;
997 break;
998
999 /*
1000 * Switch to RAW-mode.
1001 */
1002 case EXCP_EXECUTE_RAW:
1003 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1004 rc = VINF_EM_RESCHEDULE_RAW;
1005 break;
1006
1007 /*
1008 * Switch to hardware accelerated RAW-mode.
1009 */
1010 case EXCP_EXECUTE_HM:
1011 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HM\n"));
1012 rc = VINF_EM_RESCHEDULE_HM;
1013 break;
1014
1015 /*
1016 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1017 */
1018 case EXCP_RC:
1019 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1020 rc = pVM->rem.s.rc;
1021 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1022 break;
1023
1024 /*
1025 * Figure out the rest when they arrive....
1026 */
1027 default:
1028 AssertMsgFailed(("rc=%d\n", rc));
1029 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1030 rc = VINF_EM_RESCHEDULE;
1031 break;
1032 }
1033
1034 /*
1035 * Switch back the state.
1036 */
1037 pVM->rem.s.Env.interrupt_request = interrupt_request;
1038 rc2 = REMR3StateBack(pVM, pVCpu);
1039 AssertRC(rc2);
1040 }
1041
1042 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1043 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1044 return rc;
1045}
1046
1047
1048/**
1049 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1050 *
1051 * @returns VBox status code.
1052 *
1053 * @param pVM The VM handle.
1054 * @param pVCpu The Virtual CPU handle.
1055 */
1056static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1057{
1058 int rc;
1059
1060 Assert(pVM->rem.s.fInREM);
1061#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1062 cpu_single_step(&pVM->rem.s.Env, 1);
1063#else
1064 Assert(!pVM->rem.s.Env.singlestep_enabled);
1065#endif
1066
1067 /*
1068 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1069 */
1070 for (;;)
1071 {
1072 char szBuf[256];
1073
1074 /*
1075 * Log the current registers state and instruction.
1076 */
1077 remR3StateUpdate(pVM, pVCpu);
1078 DBGFR3Info(pVM->pUVM, "cpumguest", NULL, NULL);
1079 szBuf[0] = '\0';
1080 rc = DBGFR3DisasInstrEx(pVM->pUVM,
1081 pVCpu->idCpu,
1082 0, /* Sel */ 0, /* GCPtr */
1083 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1084 szBuf,
1085 sizeof(szBuf),
1086 NULL);
1087 if (RT_FAILURE(rc))
1088 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1089 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1090
1091 /*
1092 * Execute the instruction.
1093 */
1094 TMNotifyStartOfExecution(pVCpu);
1095
1096 if ( pVM->rem.s.Env.exception_index < 0
1097 || pVM->rem.s.Env.exception_index > 256)
1098 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1099
1100#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1101 pVM->rem.s.Env.interrupt_request = 0;
1102#else
1103 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1104#endif
1105 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1106 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1107 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1108 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1109 pVM->rem.s.Env.interrupt_request,
1110 pVM->rem.s.Env.halted,
1111 pVM->rem.s.Env.exception_index
1112 );
1113
1114 rc = cpu_exec(&pVM->rem.s.Env);
1115
1116 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1117 pVM->rem.s.Env.interrupt_request,
1118 pVM->rem.s.Env.halted,
1119 pVM->rem.s.Env.exception_index
1120 );
1121
1122 TMNotifyEndOfExecution(pVCpu);
1123
1124 switch (rc)
1125 {
1126#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1127 /*
1128 * The normal exit.
1129 */
1130 case EXCP_SINGLE_INSTR:
1131 if ( !VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1132 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1133 continue;
1134 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1135 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1136 rc = VINF_SUCCESS;
1137 break;
1138
1139#else
1140 /*
1141 * The normal exit, check for breakpoints at PC just to be sure.
1142 */
1143#endif
1144 case EXCP_DEBUG:
1145 if (pVM->rem.s.Env.watchpoint_hit)
1146 {
1147 /** @todo deal with watchpoints */
1148 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1149 rc = VINF_EM_DBG_BREAKPOINT;
1150 }
1151 else
1152 {
1153 CPUBreakpoint *pBP;
1154 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1155 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1156 if (pBP->pc == GCPtrPC)
1157 break;
1158 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1159 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1160 }
1161#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1162 if (rc == VINF_EM_DBG_STEPPED)
1163 {
1164 if ( !VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1165 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1166 continue;
1167
1168 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1169 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1170 rc = VINF_SUCCESS;
1171 }
1172#endif
1173 break;
1174
1175 /*
1176 * If we take a trap or start servicing a pending interrupt, we might end up here.
1177 * (Timer thread or some other thread wishing EMT's attention.)
1178 */
1179 case EXCP_INTERRUPT:
1180 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1181 rc = VINF_SUCCESS;
1182 break;
1183
1184 /*
1185 * hlt instruction.
1186 */
1187 case EXCP_HLT:
1188 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1189 rc = VINF_EM_HALT;
1190 break;
1191
1192 /*
1193 * The VM has halted.
1194 */
1195 case EXCP_HALTED:
1196 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1197 rc = VINF_EM_HALT;
1198 break;
1199
1200 /*
1201 * Switch to RAW-mode.
1202 */
1203 case EXCP_EXECUTE_RAW:
1204 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1205 rc = VINF_EM_RESCHEDULE_RAW;
1206 break;
1207
1208 /*
1209 * Switch to hardware accelerated RAW-mode.
1210 */
1211 case EXCP_EXECUTE_HM:
1212 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HM rc=VINF_EM_RESCHEDULE_HM\n");
1213 rc = VINF_EM_RESCHEDULE_HM;
1214 break;
1215
1216 /*
1217 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1218 */
1219 case EXCP_RC:
1220 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1221 rc = pVM->rem.s.rc;
1222 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1223 break;
1224
1225 /*
1226 * Figure out the rest when they arrive....
1227 */
1228 default:
1229 AssertMsgFailed(("rc=%d\n", rc));
1230 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1231 rc = VINF_EM_RESCHEDULE;
1232 break;
1233 }
1234 break;
1235 }
1236
1237#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1238// cpu_single_step(&pVM->rem.s.Env, 0);
1239#else
1240 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1241#endif
1242 return rc;
1243}
1244
1245
1246/**
1247 * Runs code in recompiled mode.
1248 *
1249 * Before calling this function the REM state needs to be in sync with
1250 * the VM. Call REMR3State() to perform the sync. It's only necessary
1251 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1252 * and after calling REMR3StateBack().
1253 *
1254 * @returns VBox status code.
1255 *
1256 * @param pVM VM Handle.
1257 * @param pVCpu VMCPU Handle.
1258 */
1259REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1260{
1261 int rc;
1262
1263 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1264 return remR3RunLoggingStep(pVM, pVCpu);
1265
1266 Assert(pVM->rem.s.fInREM);
1267 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1268
1269 TMNotifyStartOfExecution(pVCpu);
1270 rc = cpu_exec(&pVM->rem.s.Env);
1271 TMNotifyEndOfExecution(pVCpu);
1272 switch (rc)
1273 {
1274 /*
1275 * This happens when the execution was interrupted
1276 * by an external event, like pending timers.
1277 */
1278 case EXCP_INTERRUPT:
1279 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1280 rc = VINF_SUCCESS;
1281 break;
1282
1283 /*
1284 * hlt instruction.
1285 */
1286 case EXCP_HLT:
1287 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1288 rc = VINF_EM_HALT;
1289 break;
1290
1291 /*
1292 * The VM has halted.
1293 */
1294 case EXCP_HALTED:
1295 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1296 rc = VINF_EM_HALT;
1297 break;
1298
1299 /*
1300 * Breakpoint/single step.
1301 */
1302 case EXCP_DEBUG:
1303 if (pVM->rem.s.Env.watchpoint_hit)
1304 {
1305 /** @todo deal with watchpoints */
1306 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1307 rc = VINF_EM_DBG_BREAKPOINT;
1308 }
1309 else
1310 {
1311 CPUBreakpoint *pBP;
1312 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1313 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1314 if (pBP->pc == GCPtrPC)
1315 break;
1316 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1317 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1318 }
1319 break;
1320
1321 /*
1322 * Switch to RAW-mode.
1323 */
1324 case EXCP_EXECUTE_RAW:
1325 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW pc=%RGv\n", pVM->rem.s.Env.eip));
1326 rc = VINF_EM_RESCHEDULE_RAW;
1327 break;
1328
1329 /*
1330 * Switch to hardware accelerated RAW-mode.
1331 */
1332 case EXCP_EXECUTE_HM:
1333 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HM\n"));
1334 rc = VINF_EM_RESCHEDULE_HM;
1335 break;
1336
1337 /*
1338 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1339 */
1340 case EXCP_RC:
1341 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1342 rc = pVM->rem.s.rc;
1343 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1344 break;
1345
1346 /*
1347 * Figure out the rest when they arrive....
1348 */
1349 default:
1350 AssertMsgFailed(("rc=%d\n", rc));
1351 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1352 rc = VINF_SUCCESS;
1353 break;
1354 }
1355
1356 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1357 return rc;
1358}
1359
1360
1361/**
1362 * Check if the cpu state is suitable for Raw execution.
1363 *
1364 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1365 *
1366 * @param env The CPU env struct.
1367 * @param eip The EIP to check this for (might differ from env->eip).
1368 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1369 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1370 *
1371 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1372 */
1373bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1374{
1375 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1376 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1377 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1378 uint32_t u32CR0;
1379
1380#ifdef IEM_VERIFICATION_MODE
1381 return false;
1382#endif
1383
1384 /* Update counter. */
1385 env->pVM->rem.s.cCanExecuteRaw++;
1386
1387 /* Never when single stepping+logging guest code. */
1388 if (env->state & CPU_EMULATE_SINGLE_STEP)
1389 return false;
1390
1391 if (HMIsEnabled(env->pVM))
1392 {
1393#ifdef RT_OS_WINDOWS
1394 PCPUMCTX pCtx = alloca(sizeof(*pCtx));
1395#else
1396 CPUMCTX Ctx;
1397 PCPUMCTX pCtx = &Ctx;
1398#endif
1399
1400 env->state |= CPU_RAW_HM;
1401
1402 /*
1403 * The simple check first...
1404 */
1405 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1406 return false;
1407
1408 /*
1409 * Create partial context for HMR3CanExecuteGuest
1410 */
1411 pCtx->cr0 = env->cr[0];
1412 pCtx->cr3 = env->cr[3];
1413 pCtx->cr4 = env->cr[4];
1414
1415 pCtx->tr.Sel = env->tr.selector;
1416 pCtx->tr.ValidSel = env->tr.selector;
1417 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
1418 pCtx->tr.u64Base = env->tr.base;
1419 pCtx->tr.u32Limit = env->tr.limit;
1420 pCtx->tr.Attr.u = (env->tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1421
1422 pCtx->ldtr.Sel = env->ldt.selector;
1423 pCtx->ldtr.ValidSel = env->ldt.selector;
1424 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1425 pCtx->ldtr.u64Base = env->ldt.base;
1426 pCtx->ldtr.u32Limit = env->ldt.limit;
1427 pCtx->ldtr.Attr.u = (env->ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1428
1429 pCtx->idtr.cbIdt = env->idt.limit;
1430 pCtx->idtr.pIdt = env->idt.base;
1431
1432 pCtx->gdtr.cbGdt = env->gdt.limit;
1433 pCtx->gdtr.pGdt = env->gdt.base;
1434
1435 pCtx->rsp = env->regs[R_ESP];
1436 pCtx->rip = env->eip;
1437
1438 pCtx->eflags.u32 = env->eflags;
1439
1440 pCtx->cs.Sel = env->segs[R_CS].selector;
1441 pCtx->cs.ValidSel = env->segs[R_CS].selector;
1442 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1443 pCtx->cs.u64Base = env->segs[R_CS].base;
1444 pCtx->cs.u32Limit = env->segs[R_CS].limit;
1445 pCtx->cs.Attr.u = (env->segs[R_CS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1446
1447 pCtx->ds.Sel = env->segs[R_DS].selector;
1448 pCtx->ds.ValidSel = env->segs[R_DS].selector;
1449 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
1450 pCtx->ds.u64Base = env->segs[R_DS].base;
1451 pCtx->ds.u32Limit = env->segs[R_DS].limit;
1452 pCtx->ds.Attr.u = (env->segs[R_DS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1453
1454 pCtx->es.Sel = env->segs[R_ES].selector;
1455 pCtx->es.ValidSel = env->segs[R_ES].selector;
1456 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
1457 pCtx->es.u64Base = env->segs[R_ES].base;
1458 pCtx->es.u32Limit = env->segs[R_ES].limit;
1459 pCtx->es.Attr.u = (env->segs[R_ES].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1460
1461 pCtx->fs.Sel = env->segs[R_FS].selector;
1462 pCtx->fs.ValidSel = env->segs[R_FS].selector;
1463 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
1464 pCtx->fs.u64Base = env->segs[R_FS].base;
1465 pCtx->fs.u32Limit = env->segs[R_FS].limit;
1466 pCtx->fs.Attr.u = (env->segs[R_FS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1467
1468 pCtx->gs.Sel = env->segs[R_GS].selector;
1469 pCtx->gs.ValidSel = env->segs[R_GS].selector;
1470 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
1471 pCtx->gs.u64Base = env->segs[R_GS].base;
1472 pCtx->gs.u32Limit = env->segs[R_GS].limit;
1473 pCtx->gs.Attr.u = (env->segs[R_GS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1474
1475 pCtx->ss.Sel = env->segs[R_SS].selector;
1476 pCtx->ss.ValidSel = env->segs[R_SS].selector;
1477 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1478 pCtx->ss.u64Base = env->segs[R_SS].base;
1479 pCtx->ss.u32Limit = env->segs[R_SS].limit;
1480 pCtx->ss.Attr.u = (env->segs[R_SS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1481
1482 pCtx->msrEFER = env->efer;
1483
1484 /* Hardware accelerated raw-mode:
1485 *
1486 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1487 */
1488 if (HMR3CanExecuteGuest(env->pVM, pCtx) == true)
1489 {
1490 *piException = EXCP_EXECUTE_HM;
1491 return true;
1492 }
1493 return false;
1494 }
1495
1496 /*
1497 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1498 * or 32 bits protected mode ring 0 code
1499 *
1500 * The tests are ordered by the likelihood of being true during normal execution.
1501 */
1502 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1503 {
1504 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1505 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1506 return false;
1507 }
1508
1509#ifndef VBOX_RAW_V86
1510 if (fFlags & VM_MASK) {
1511 STAM_COUNTER_INC(&gStatRefuseVM86);
1512 Log2(("raw mode refused: VM_MASK\n"));
1513 return false;
1514 }
1515#endif
1516
1517 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1518 {
1519#ifndef DEBUG_bird
1520 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1521#endif
1522 return false;
1523 }
1524
1525 if (env->singlestep_enabled)
1526 {
1527 //Log2(("raw mode refused: Single step\n"));
1528 return false;
1529 }
1530
1531 if (!QTAILQ_EMPTY(&env->breakpoints))
1532 {
1533 //Log2(("raw mode refused: Breakpoints\n"));
1534 return false;
1535 }
1536
1537 if (!QTAILQ_EMPTY(&env->watchpoints))
1538 {
1539 //Log2(("raw mode refused: Watchpoints\n"));
1540 return false;
1541 }
1542
1543 u32CR0 = env->cr[0];
1544 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1545 {
1546 STAM_COUNTER_INC(&gStatRefusePaging);
1547 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1548 return false;
1549 }
1550
1551 if (env->cr[4] & CR4_PAE_MASK)
1552 {
1553 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1554 {
1555 STAM_COUNTER_INC(&gStatRefusePAE);
1556 return false;
1557 }
1558 }
1559
1560 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1561 {
1562 if (!EMIsRawRing3Enabled(env->pVM))
1563 return false;
1564
1565 if (!(env->eflags & IF_MASK))
1566 {
1567 STAM_COUNTER_INC(&gStatRefuseIF0);
1568 Log2(("raw mode refused: IF (RawR3)\n"));
1569 return false;
1570 }
1571
1572 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1573 {
1574 STAM_COUNTER_INC(&gStatRefuseWP0);
1575 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1576 return false;
1577 }
1578 }
1579 else
1580 {
1581 if (!EMIsRawRing0Enabled(env->pVM))
1582 return false;
1583
1584 // Let's start with pure 32 bits ring 0 code first
1585 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1586 {
1587 STAM_COUNTER_INC(&gStatRefuseCode16);
1588 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1589 return false;
1590 }
1591
1592 if (EMIsRawRing1Enabled(env->pVM))
1593 {
1594 /* Only ring 0 and 1 supervisor code. */
1595 if (((fFlags >> HF_CPL_SHIFT) & 3) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1596 {
1597 Log2(("raw r0 mode refused: CPL %d\n", (fFlags >> HF_CPL_SHIFT) & 3));
1598 return false;
1599 }
1600 }
1601 /* Only R0. */
1602 else if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1603 {
1604 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1605 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1606 return false;
1607 }
1608
1609 if (!(u32CR0 & CR0_WP_MASK))
1610 {
1611 STAM_COUNTER_INC(&gStatRefuseWP0);
1612 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1613 return false;
1614 }
1615
1616#ifdef VBOX_WITH_RAW_MODE
1617 if (PATMIsPatchGCAddr(env->pVM, eip))
1618 {
1619 Log2(("raw r0 mode forced: patch code\n"));
1620 *piException = EXCP_EXECUTE_RAW;
1621 return true;
1622 }
1623#endif
1624
1625#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1626 if (!(env->eflags & IF_MASK))
1627 {
1628 STAM_COUNTER_INC(&gStatRefuseIF0);
1629 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1630 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1631 return false;
1632 }
1633#endif
1634
1635#ifndef VBOX_WITH_RAW_RING1
1636 if (((env->eflags >> IOPL_SHIFT) & 3) != 0)
1637 {
1638 Log2(("raw r0 mode refused: IOPL %d\n", ((env->eflags >> IOPL_SHIFT) & 3)));
1639 return false;
1640 }
1641#endif
1642 env->state |= CPU_RAW_RING0;
1643 }
1644
1645 /*
1646 * Don't reschedule the first time we're called, because there might be
1647 * special reasons why we're here that is not covered by the above checks.
1648 */
1649 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1650 {
1651 Log2(("raw mode refused: first scheduling\n"));
1652 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1653 return false;
1654 }
1655
1656 /*
1657 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1658 */
1659 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1660 {
1661 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector));
1662 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]);
1663 return false;
1664 }
1665 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1666 {
1667 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector));
1668 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]);
1669 return false;
1670 }
1671 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1672 {
1673 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector));
1674 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]);
1675 return false;
1676 }
1677 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1678 {
1679 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector));
1680 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]);
1681 return false;
1682 }
1683 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1684 {
1685 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector));
1686 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]);
1687 return false;
1688 }
1689 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1690 {
1691 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector));
1692 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]);
1693 return false;
1694 }
1695
1696/* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
1697 *piException = EXCP_EXECUTE_RAW;
1698 return true;
1699}
1700
1701
1702#ifdef VBOX_WITH_RAW_MODE
1703/**
1704 * Fetches a code byte.
1705 *
1706 * @returns Success indicator (bool) for ease of use.
1707 * @param env The CPU environment structure.
1708 * @param GCPtrInstr Where to fetch code.
1709 * @param pu8Byte Where to store the byte on success
1710 */
1711bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1712{
1713 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1714 if (RT_SUCCESS(rc))
1715 return true;
1716 return false;
1717}
1718#endif /* VBOX_WITH_RAW_MODE */
1719
1720
1721/**
1722 * Flush (or invalidate if you like) page table/dir entry.
1723 *
1724 * (invlpg instruction; tlb_flush_page)
1725 *
1726 * @param env Pointer to cpu environment.
1727 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1728 */
1729void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1730{
1731 PVM pVM = env->pVM;
1732 PCPUMCTX pCtx;
1733 int rc;
1734
1735 Assert(EMRemIsLockOwner(env->pVM));
1736
1737 /*
1738 * When we're replaying invlpg instructions or restoring a saved
1739 * state we disable this path.
1740 */
1741 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1742 return;
1743 LogFlow(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1744 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1745
1746 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1747
1748 /*
1749 * Update the control registers before calling PGMFlushPage.
1750 */
1751 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1752 Assert(pCtx);
1753 pCtx->cr0 = env->cr[0];
1754 pCtx->cr3 = env->cr[3];
1755#ifdef VBOX_WITH_RAW_MODE
1756 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1757 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1758#endif
1759 pCtx->cr4 = env->cr[4];
1760
1761 /*
1762 * Let PGM do the rest.
1763 */
1764 Assert(env->pVCpu);
1765 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1766 if (RT_FAILURE(rc))
1767 {
1768 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1769 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1770 }
1771 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1772}
1773
1774
1775#ifndef REM_PHYS_ADDR_IN_TLB
1776/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1777void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1778{
1779 void *pv;
1780 int rc;
1781
1782
1783 /* Address must be aligned enough to fiddle with lower bits */
1784 Assert((physAddr & 0x3) == 0);
1785 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1786
1787 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1788 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1789 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1790 Assert( rc == VINF_SUCCESS
1791 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1792 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1793 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1794 if (RT_FAILURE(rc))
1795 return (void *)1;
1796 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1797 return (void *)((uintptr_t)pv | 2);
1798 return pv;
1799}
1800#endif /* REM_PHYS_ADDR_IN_TLB */
1801
1802
1803/**
1804 * Called from tlb_protect_code in order to write monitor a code page.
1805 *
1806 * @param env Pointer to the CPU environment.
1807 * @param GCPtr Code page to monitor
1808 */
1809void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1810{
1811#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1812 Assert(env->pVM->rem.s.fInREM);
1813 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1814 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1815 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1816 && !(env->eflags & VM_MASK) /* no V86 mode */
1817 && !HMIsEnabled(env->pVM))
1818 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1819#endif
1820}
1821
1822
1823/**
1824 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1825 *
1826 * @param env Pointer to the CPU environment.
1827 * @param GCPtr Code page to monitor
1828 */
1829void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1830{
1831 Assert(env->pVM->rem.s.fInREM);
1832#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1833 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1834 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1835 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1836 && !(env->eflags & VM_MASK) /* no V86 mode */
1837 && !HMIsEnabled(env->pVM))
1838 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1839#endif
1840}
1841
1842
1843/**
1844 * Called when the CPU is initialized, any of the CRx registers are changed or
1845 * when the A20 line is modified.
1846 *
1847 * @param env Pointer to the CPU environment.
1848 * @param fGlobal Set if the flush is global.
1849 */
1850void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1851{
1852 PVM pVM = env->pVM;
1853 PCPUMCTX pCtx;
1854 Assert(EMRemIsLockOwner(pVM));
1855
1856 /*
1857 * When we're replaying invlpg instructions or restoring a saved
1858 * state we disable this path.
1859 */
1860 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1861 return;
1862 Assert(pVM->rem.s.fInREM);
1863
1864 /*
1865 * The caller doesn't check cr4, so we have to do that for ourselves.
1866 */
1867 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1868 fGlobal = true;
1869 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1870
1871 /*
1872 * Update the control registers before calling PGMR3FlushTLB.
1873 */
1874 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1875 Assert(pCtx);
1876 pCtx->cr0 = env->cr[0];
1877 pCtx->cr3 = env->cr[3];
1878#ifdef VBOX_WITH_RAW_MODE
1879 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1880 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1881#endif
1882 pCtx->cr4 = env->cr[4];
1883
1884 /*
1885 * Let PGM do the rest.
1886 */
1887 Assert(env->pVCpu);
1888 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1889}
1890
1891
1892/**
1893 * Called when any of the cr0, cr4 or efer registers is updated.
1894 *
1895 * @param env Pointer to the CPU environment.
1896 */
1897void remR3ChangeCpuMode(CPUX86State *env)
1898{
1899 PVM pVM = env->pVM;
1900 uint64_t efer;
1901 PCPUMCTX pCtx;
1902 int rc;
1903
1904 /*
1905 * When we're replaying loads or restoring a saved
1906 * state this path is disabled.
1907 */
1908 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1909 return;
1910 Assert(pVM->rem.s.fInREM);
1911
1912 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1913 Assert(pCtx);
1914
1915 /*
1916 * Notify PGM about WP0 being enabled (like CPUSetGuestCR0 does).
1917 */
1918 if (((env->cr[0] ^ pCtx->cr0) & X86_CR0_WP) && (env->cr[0] & X86_CR0_WP))
1919 PGMCr0WpEnabled(env->pVCpu);
1920
1921 /*
1922 * Update the control registers before calling PGMChangeMode()
1923 * as it may need to map whatever cr3 is pointing to.
1924 */
1925 pCtx->cr0 = env->cr[0];
1926 pCtx->cr3 = env->cr[3];
1927#ifdef VBOX_WITH_RAW_MODE
1928 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1929 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1930#endif
1931 pCtx->cr4 = env->cr[4];
1932#ifdef TARGET_X86_64
1933 efer = env->efer;
1934 pCtx->msrEFER = efer;
1935#else
1936 efer = 0;
1937#endif
1938 Assert(env->pVCpu);
1939 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1940 if (rc != VINF_SUCCESS)
1941 {
1942 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1943 {
1944 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1945 remR3RaiseRC(env->pVM, rc);
1946 }
1947 else
1948 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1949 }
1950}
1951
1952
1953/**
1954 * Called from compiled code to run dma.
1955 *
1956 * @param env Pointer to the CPU environment.
1957 */
1958void remR3DmaRun(CPUX86State *env)
1959{
1960 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1961 PDMR3DmaRun(env->pVM);
1962 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1963}
1964
1965
1966/**
1967 * Called from compiled code to schedule pending timers in VMM
1968 *
1969 * @param env Pointer to the CPU environment.
1970 */
1971void remR3TimersRun(CPUX86State *env)
1972{
1973 LogFlow(("remR3TimersRun:\n"));
1974 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1975 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1976 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1977 TMR3TimerQueuesDo(env->pVM);
1978 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1979 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1980}
1981
1982
1983/**
1984 * Record trap occurrence
1985 *
1986 * @returns VBox status code
1987 * @param env Pointer to the CPU environment.
1988 * @param uTrap Trap nr
1989 * @param uErrorCode Error code
1990 * @param pvNextEIP Next EIP
1991 */
1992int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1993{
1994 PVM pVM = env->pVM;
1995#ifdef VBOX_WITH_STATISTICS
1996 static STAMCOUNTER s_aStatTrap[255];
1997 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1998#endif
1999
2000#ifdef VBOX_WITH_STATISTICS
2001 if (uTrap < 255)
2002 {
2003 if (!s_aRegisters[uTrap])
2004 {
2005 char szStatName[64];
2006 s_aRegisters[uTrap] = true;
2007 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
2008 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
2009 }
2010 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
2011 }
2012#endif
2013 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2014 if( uTrap < 0x20
2015 && (env->cr[0] & X86_CR0_PE)
2016 && !(env->eflags & X86_EFL_VM))
2017 {
2018#ifdef DEBUG
2019 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
2020#endif
2021 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
2022 {
2023 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2024 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
2025 return VERR_REM_TOO_MANY_TRAPS;
2026 }
2027 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
2028 {
2029 Log(("remR3NotifyTrap: uTrap=%#x set as pending\n", uTrap));
2030 pVM->rem.s.cPendingExceptions = 1;
2031 }
2032 pVM->rem.s.uPendingException = uTrap;
2033 pVM->rem.s.uPendingExcptEIP = env->eip;
2034 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2035 }
2036 else
2037 {
2038 pVM->rem.s.cPendingExceptions = 0;
2039 pVM->rem.s.uPendingException = uTrap;
2040 pVM->rem.s.uPendingExcptEIP = env->eip;
2041 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2042 }
2043 return VINF_SUCCESS;
2044}
2045
2046
2047/*
2048 * Clear current active trap
2049 *
2050 * @param pVM VM Handle.
2051 */
2052void remR3TrapClear(PVM pVM)
2053{
2054 pVM->rem.s.cPendingExceptions = 0;
2055 pVM->rem.s.uPendingException = 0;
2056 pVM->rem.s.uPendingExcptEIP = 0;
2057 pVM->rem.s.uPendingExcptCR2 = 0;
2058}
2059
2060
2061/*
2062 * Record previous call instruction addresses
2063 *
2064 * @param env Pointer to the CPU environment.
2065 */
2066void remR3RecordCall(CPUX86State *env)
2067{
2068#ifdef VBOX_WITH_RAW_MODE
2069 CSAMR3RecordCallAddress(env->pVM, env->eip);
2070#endif
2071}
2072
2073
2074/**
2075 * Syncs the internal REM state with the VM.
2076 *
2077 * This must be called before REMR3Run() is invoked whenever when the REM
2078 * state is not up to date. Calling it several times in a row is not
2079 * permitted.
2080 *
2081 * @returns VBox status code.
2082 *
2083 * @param pVM VM Handle.
2084 * @param pVCpu VMCPU Handle.
2085 *
2086 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2087 * no do this since the majority of the callers don't want any unnecessary of events
2088 * pending that would immediately interrupt execution.
2089 */
2090REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2091{
2092 register const CPUMCTX *pCtx;
2093 register unsigned fFlags;
2094 unsigned i;
2095 TRPMEVENT enmType;
2096 uint8_t u8TrapNo;
2097 uint32_t uCpl;
2098 int rc;
2099
2100 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2101 Log2(("REMR3State:\n"));
2102
2103 pVM->rem.s.Env.pVCpu = pVCpu;
2104 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2105
2106 Assert(!pVM->rem.s.fInREM);
2107 pVM->rem.s.fInStateSync = true;
2108
2109 /*
2110 * If we have to flush TBs, do that immediately.
2111 */
2112 if (pVM->rem.s.fFlushTBs)
2113 {
2114 STAM_COUNTER_INC(&gStatFlushTBs);
2115 tb_flush(&pVM->rem.s.Env);
2116 pVM->rem.s.fFlushTBs = false;
2117 }
2118
2119 /*
2120 * Copy the registers which require no special handling.
2121 */
2122#ifdef TARGET_X86_64
2123 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2124 Assert(R_EAX == 0);
2125 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2126 Assert(R_ECX == 1);
2127 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2128 Assert(R_EDX == 2);
2129 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2130 Assert(R_EBX == 3);
2131 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2132 Assert(R_ESP == 4);
2133 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2134 Assert(R_EBP == 5);
2135 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2136 Assert(R_ESI == 6);
2137 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2138 Assert(R_EDI == 7);
2139 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2140 pVM->rem.s.Env.regs[8] = pCtx->r8;
2141 pVM->rem.s.Env.regs[9] = pCtx->r9;
2142 pVM->rem.s.Env.regs[10] = pCtx->r10;
2143 pVM->rem.s.Env.regs[11] = pCtx->r11;
2144 pVM->rem.s.Env.regs[12] = pCtx->r12;
2145 pVM->rem.s.Env.regs[13] = pCtx->r13;
2146 pVM->rem.s.Env.regs[14] = pCtx->r14;
2147 pVM->rem.s.Env.regs[15] = pCtx->r15;
2148
2149 pVM->rem.s.Env.eip = pCtx->rip;
2150
2151 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2152#else
2153 Assert(R_EAX == 0);
2154 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2155 Assert(R_ECX == 1);
2156 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2157 Assert(R_EDX == 2);
2158 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2159 Assert(R_EBX == 3);
2160 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2161 Assert(R_ESP == 4);
2162 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2163 Assert(R_EBP == 5);
2164 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2165 Assert(R_ESI == 6);
2166 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2167 Assert(R_EDI == 7);
2168 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2169 pVM->rem.s.Env.eip = pCtx->eip;
2170
2171 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2172#endif
2173
2174 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2175
2176 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2177 for (i=0;i<8;i++)
2178 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2179
2180#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2181 /*
2182 * Clear the halted hidden flag (the interrupt waking up the CPU can
2183 * have been dispatched in raw mode).
2184 */
2185 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2186#endif
2187
2188 /*
2189 * Replay invlpg? Only if we're not flushing the TLB.
2190 */
2191 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2192 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2193 if (pVM->rem.s.cInvalidatedPages)
2194 {
2195 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2196 {
2197 RTUINT i;
2198
2199 pVM->rem.s.fIgnoreCR3Load = true;
2200 pVM->rem.s.fIgnoreInvlPg = true;
2201 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2202 {
2203 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2204 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2205 }
2206 pVM->rem.s.fIgnoreInvlPg = false;
2207 pVM->rem.s.fIgnoreCR3Load = false;
2208 }
2209 pVM->rem.s.cInvalidatedPages = 0;
2210 }
2211
2212 /* Replay notification changes. */
2213 REMR3ReplayHandlerNotifications(pVM);
2214
2215 /* Update MSRs; before CRx registers! */
2216 pVM->rem.s.Env.efer = pCtx->msrEFER;
2217 pVM->rem.s.Env.star = pCtx->msrSTAR;
2218 pVM->rem.s.Env.pat = pCtx->msrPAT;
2219#ifdef TARGET_X86_64
2220 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2221 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2222 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2223 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2224
2225 /* Update the internal long mode activate flag according to the new EFER value. */
2226 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2227 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2228 else
2229 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2230#endif
2231
2232 /* Update the inhibit IRQ mask. */
2233 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2234 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2235 {
2236 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2237 if (InhibitPC == pCtx->rip)
2238 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2239 else
2240 {
2241 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2242 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2243 }
2244 }
2245
2246 /* Update the inhibit NMI mask. */
2247 pVM->rem.s.Env.hflags2 &= ~HF2_NMI_MASK;
2248 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2249 pVM->rem.s.Env.hflags2 |= HF2_NMI_MASK;
2250
2251 /*
2252 * Sync the A20 gate.
2253 */
2254 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2255 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2256 {
2257 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2258 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2259 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2260 }
2261
2262 /*
2263 * Registers which are rarely changed and require special handling / order when changed.
2264 */
2265 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2266 | CPUM_CHANGED_CR4
2267 | CPUM_CHANGED_CR0
2268 | CPUM_CHANGED_CR3
2269 | CPUM_CHANGED_GDTR
2270 | CPUM_CHANGED_IDTR
2271 | CPUM_CHANGED_SYSENTER_MSR
2272 | CPUM_CHANGED_LDTR
2273 | CPUM_CHANGED_CPUID
2274 | CPUM_CHANGED_FPU_REM
2275 )
2276 )
2277 {
2278 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2279 {
2280 pVM->rem.s.fIgnoreCR3Load = true;
2281 tlb_flush(&pVM->rem.s.Env, true);
2282 pVM->rem.s.fIgnoreCR3Load = false;
2283 }
2284
2285 /* CR4 before CR0! */
2286 if (fFlags & CPUM_CHANGED_CR4)
2287 {
2288 pVM->rem.s.fIgnoreCR3Load = true;
2289 pVM->rem.s.fIgnoreCpuMode = true;
2290 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2291 pVM->rem.s.fIgnoreCpuMode = false;
2292 pVM->rem.s.fIgnoreCR3Load = false;
2293 }
2294
2295 if (fFlags & CPUM_CHANGED_CR0)
2296 {
2297 pVM->rem.s.fIgnoreCR3Load = true;
2298 pVM->rem.s.fIgnoreCpuMode = true;
2299 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2300 pVM->rem.s.fIgnoreCpuMode = false;
2301 pVM->rem.s.fIgnoreCR3Load = false;
2302 }
2303
2304 if (fFlags & CPUM_CHANGED_CR3)
2305 {
2306 pVM->rem.s.fIgnoreCR3Load = true;
2307 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2308 pVM->rem.s.fIgnoreCR3Load = false;
2309 }
2310
2311 if (fFlags & CPUM_CHANGED_GDTR)
2312 {
2313 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2314 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2315 }
2316
2317 if (fFlags & CPUM_CHANGED_IDTR)
2318 {
2319 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2320 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2321 }
2322
2323 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2324 {
2325 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2326 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2327 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2328 }
2329
2330 if (fFlags & CPUM_CHANGED_LDTR)
2331 {
2332 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2333 {
2334 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2335 pVM->rem.s.Env.ldt.newselector = 0;
2336 pVM->rem.s.Env.ldt.fVBoxFlags = pCtx->ldtr.fFlags;
2337 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2338 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2339 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2340 }
2341 else
2342 {
2343 AssertFailed(); /* Shouldn't happen, see cpumR3LoadExec. */
2344 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2345 }
2346 }
2347
2348 if (fFlags & CPUM_CHANGED_CPUID)
2349 {
2350 uint32_t u32Dummy;
2351
2352 /*
2353 * Get the CPUID features.
2354 */
2355 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2356 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2357 }
2358
2359 /* Sync FPU state after CR4, CPUID and EFER (!). */
2360 if (fFlags & CPUM_CHANGED_FPU_REM)
2361 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87); /* 'save' is an excellent name. */
2362 }
2363
2364 /*
2365 * Sync TR unconditionally to make life simpler.
2366 */
2367 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2368 pVM->rem.s.Env.tr.newselector = 0;
2369 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags;
2370 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2371 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2372 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2373 /* Note! do_interrupt will fault if the busy flag is still set... */ /** @todo so fix do_interrupt then! */
2374 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2375
2376 /*
2377 * Update selector registers.
2378 *
2379 * This must be done *after* we've synced gdt, ldt and crX registers
2380 * since we're reading the GDT/LDT om sync_seg. This will happen with
2381 * saved state which takes a quick dip into rawmode for instance.
2382 *
2383 * CPL/Stack; Note first check this one as the CPL might have changed.
2384 * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
2385 */
2386 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2387 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2388#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
2389 do \
2390 { \
2391 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
2392 { \
2393 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
2394 (a_pVBoxSReg)->Sel, \
2395 (a_pVBoxSReg)->u64Base, \
2396 (a_pVBoxSReg)->u32Limit, \
2397 ((a_pVBoxSReg)->Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT); \
2398 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
2399 } \
2400 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
2401 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
2402 { \
2403 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
2404 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
2405 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
2406 if ((a_pRemSReg)->newselector) \
2407 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
2408 } \
2409 else \
2410 (a_pRemSReg)->newselector = 0; \
2411 } while (0)
2412
2413 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
2414 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
2415 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
2416 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
2417 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
2418 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
2419 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2420 * be the same but not the base/limit. */
2421
2422 /*
2423 * Check for traps.
2424 */
2425 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2426 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2427 if (RT_SUCCESS(rc))
2428 {
2429#ifdef DEBUG
2430 if (u8TrapNo == 0x80)
2431 {
2432 remR3DumpLnxSyscall(pVCpu);
2433 remR3DumpOBsdSyscall(pVCpu);
2434 }
2435#endif
2436
2437 pVM->rem.s.Env.exception_index = u8TrapNo;
2438 if (enmType != TRPM_SOFTWARE_INT)
2439 {
2440 pVM->rem.s.Env.exception_is_int = 0;
2441#ifdef IEM_VERIFICATION_MODE /* Ugly hack, needs proper fixing. */
2442 pVM->rem.s.Env.exception_is_int = enmType == TRPM_HARDWARE_INT ? 0x42 : 0;
2443#endif
2444 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2445 }
2446 else
2447 {
2448 /*
2449 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2450 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2451 * for int03 and into.
2452 */
2453 pVM->rem.s.Env.exception_is_int = 1;
2454 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2455 /* int 3 may be generated by one-byte 0xcc */
2456 if (u8TrapNo == 3)
2457 {
2458 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2459 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2460 }
2461 /* int 4 may be generated by one-byte 0xce */
2462 else if (u8TrapNo == 4)
2463 {
2464 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2465 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2466 }
2467 }
2468
2469 /* get error code and cr2 if needed. */
2470 if (enmType == TRPM_TRAP)
2471 {
2472 switch (u8TrapNo)
2473 {
2474 case X86_XCPT_PF:
2475 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2476 /* fallthru */
2477 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2478 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2479 break;
2480
2481 case X86_XCPT_AC: case X86_XCPT_DF:
2482 default:
2483 pVM->rem.s.Env.error_code = 0;
2484 break;
2485 }
2486 }
2487 else
2488 pVM->rem.s.Env.error_code = 0;
2489
2490 /*
2491 * We can now reset the active trap since the recompiler is gonna have a go at it.
2492 */
2493 rc = TRPMResetTrap(pVCpu);
2494 AssertRC(rc);
2495 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2496 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2497 }
2498
2499 /*
2500 * Clear old interrupt request flags; Check for pending hardware interrupts.
2501 * (See @remark for why we don't check for other FFs.)
2502 */
2503 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2504 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2505 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2506 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2507
2508 /*
2509 * We're now in REM mode.
2510 */
2511 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2512 pVM->rem.s.fInREM = true;
2513 pVM->rem.s.fInStateSync = false;
2514 pVM->rem.s.cCanExecuteRaw = 0;
2515 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2516 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2517 return VINF_SUCCESS;
2518}
2519
2520
2521/**
2522 * Syncs back changes in the REM state to the the VM state.
2523 *
2524 * This must be called after invoking REMR3Run().
2525 * Calling it several times in a row is not permitted.
2526 *
2527 * @returns VBox status code.
2528 *
2529 * @param pVM VM Handle.
2530 * @param pVCpu VMCPU Handle.
2531 */
2532REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2533{
2534 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2535 Assert(pCtx);
2536 unsigned i;
2537
2538 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2539 Log2(("REMR3StateBack:\n"));
2540 Assert(pVM->rem.s.fInREM);
2541
2542 /*
2543 * Copy back the registers.
2544 * This is done in the order they are declared in the CPUMCTX structure.
2545 */
2546
2547 /** @todo FOP */
2548 /** @todo FPUIP */
2549 /** @todo CS */
2550 /** @todo FPUDP */
2551 /** @todo DS */
2552
2553 /** @todo check if FPU/XMM was actually used in the recompiler */
2554 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87);
2555//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2556
2557#ifdef TARGET_X86_64
2558 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2559 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2560 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2561 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2562 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2563 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2564 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2565 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2566 pCtx->r8 = pVM->rem.s.Env.regs[8];
2567 pCtx->r9 = pVM->rem.s.Env.regs[9];
2568 pCtx->r10 = pVM->rem.s.Env.regs[10];
2569 pCtx->r11 = pVM->rem.s.Env.regs[11];
2570 pCtx->r12 = pVM->rem.s.Env.regs[12];
2571 pCtx->r13 = pVM->rem.s.Env.regs[13];
2572 pCtx->r14 = pVM->rem.s.Env.regs[14];
2573 pCtx->r15 = pVM->rem.s.Env.regs[15];
2574
2575 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2576
2577#else
2578 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2579 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2580 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2581 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2582 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2583 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2584 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2585
2586 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2587#endif
2588
2589#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2590 do \
2591 { \
2592 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2593 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2594 { \
2595 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2596 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2597 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2598 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2599 /* Note! QEmu saves the 2nd dword of the descriptor; we (VT-x/AMD-V) keep only the attributes! */ \
2600 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; \
2601 } \
2602 else \
2603 { \
2604 pCtx->a_sreg.fFlags = 0; \
2605 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2606 } \
2607 } while (0)
2608
2609 SYNC_BACK_SREG(es, ES);
2610 SYNC_BACK_SREG(cs, CS);
2611 SYNC_BACK_SREG(ss, SS);
2612 SYNC_BACK_SREG(ds, DS);
2613 SYNC_BACK_SREG(fs, FS);
2614 SYNC_BACK_SREG(gs, GS);
2615
2616#ifdef TARGET_X86_64
2617 pCtx->rip = pVM->rem.s.Env.eip;
2618 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2619#else
2620 pCtx->eip = pVM->rem.s.Env.eip;
2621 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2622#endif
2623
2624 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2625 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2626 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2627#ifdef VBOX_WITH_RAW_MODE
2628 if (((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
2629 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2630#endif
2631 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2632
2633 for (i = 0; i < 8; i++)
2634 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2635
2636 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2637 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2638 {
2639 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2640 STAM_COUNTER_INC(&gStatREMGDTChange);
2641#ifdef VBOX_WITH_RAW_MODE
2642 if (!HMIsEnabled(pVM))
2643 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2644#endif
2645 }
2646
2647 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2648 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2649 {
2650 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2651 STAM_COUNTER_INC(&gStatREMIDTChange);
2652#ifdef VBOX_WITH_RAW_MODE
2653 if (!HMIsEnabled(pVM))
2654 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2655#endif
2656 }
2657
2658 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2659 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2660 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2661 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2662 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2663 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2664 )
2665 {
2666 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2667 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2668 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2669 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2670 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2671 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2672 STAM_COUNTER_INC(&gStatREMLDTRChange);
2673#ifdef VBOX_WITH_RAW_MODE
2674 if (!HMIsEnabled(pVM))
2675 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2676#endif
2677 }
2678
2679 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2680 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2681 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2682 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2683 /* Qemu and AMD/Intel have different ideas about the busy flag ... */ /** @todo just fix qemu! */
2684 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2685 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT
2686 : 0)
2687 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2688 )
2689 {
2690 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2691 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2692 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2693 (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2694 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT : 0));
2695 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2696 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2697 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2698 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2699 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2700 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2701 if (pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE)
2702 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> SEL_FLAGS_SHIFT;
2703 STAM_COUNTER_INC(&gStatREMTRChange);
2704#ifdef VBOX_WITH_RAW_MODE
2705 if (!HMIsEnabled(pVM))
2706 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2707#endif
2708 }
2709
2710 /* Sysenter MSR */
2711 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2712 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2713 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2714
2715 /* System MSRs. */
2716 pCtx->msrEFER = pVM->rem.s.Env.efer;
2717 pCtx->msrSTAR = pVM->rem.s.Env.star;
2718 pCtx->msrPAT = pVM->rem.s.Env.pat;
2719#ifdef TARGET_X86_64
2720 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2721 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2722 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2723 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2724#endif
2725
2726 /* Inhibit interrupt flag. */
2727 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2728 {
2729 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2730 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2731 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2732 }
2733 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2734 {
2735 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2736 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2737 }
2738
2739 /* Inhibit NMI flag. */
2740 if (pVM->rem.s.Env.hflags2 & HF2_NMI_MASK)
2741 {
2742 Log(("Settings VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2743 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2744 }
2745 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2746 {
2747 Log(("Clearing VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2748 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2749 }
2750
2751 remR3TrapClear(pVM);
2752
2753 /*
2754 * Check for traps.
2755 */
2756 if ( pVM->rem.s.Env.exception_index >= 0
2757 && pVM->rem.s.Env.exception_index < 256)
2758 {
2759 /* This cannot be a hardware-interrupt because exception_index < EXCP_INTERRUPT. */
2760 int rc;
2761
2762 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2763 TRPMEVENT enmType = pVM->rem.s.Env.exception_is_int ? TRPM_SOFTWARE_INT : TRPM_TRAP;
2764 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, enmType);
2765 AssertRC(rc);
2766 if (enmType == TRPM_TRAP)
2767 {
2768 switch (pVM->rem.s.Env.exception_index)
2769 {
2770 case X86_XCPT_PF:
2771 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2772 /* fallthru */
2773 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2774 case X86_XCPT_AC: case X86_XCPT_DF: /* 0 */
2775 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2776 break;
2777 }
2778 }
2779 }
2780
2781 /*
2782 * We're not longer in REM mode.
2783 */
2784 CPUMR3RemLeave(pVCpu,
2785 HMIsEnabled(pVM)
2786 || ( pVM->rem.s.Env.segs[R_SS].newselector
2787 | pVM->rem.s.Env.segs[R_GS].newselector
2788 | pVM->rem.s.Env.segs[R_FS].newselector
2789 | pVM->rem.s.Env.segs[R_ES].newselector
2790 | pVM->rem.s.Env.segs[R_DS].newselector
2791 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2792 );
2793 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2794 pVM->rem.s.fInREM = false;
2795 pVM->rem.s.pCtx = NULL;
2796 pVM->rem.s.Env.pVCpu = NULL;
2797 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2798 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2799 return VINF_SUCCESS;
2800}
2801
2802
2803/**
2804 * This is called by the disassembler when it wants to update the cpu state
2805 * before for instance doing a register dump.
2806 */
2807static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2808{
2809 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2810 unsigned i;
2811
2812 Assert(pVM->rem.s.fInREM);
2813
2814 /*
2815 * Copy back the registers.
2816 * This is done in the order they are declared in the CPUMCTX structure.
2817 */
2818
2819 PX86FXSTATE pFpuCtx = &pCtx->pXStateR3->x87;
2820 /** @todo FOP */
2821 /** @todo FPUIP */
2822 /** @todo CS */
2823 /** @todo FPUDP */
2824 /** @todo DS */
2825 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2826 pFpuCtx->MXCSR = 0;
2827 pFpuCtx->MXCSR_MASK = 0;
2828
2829 /** @todo check if FPU/XMM was actually used in the recompiler */
2830 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)pFpuCtx);
2831//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2832
2833#ifdef TARGET_X86_64
2834 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2835 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2836 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2837 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2838 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2839 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2840 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2841 pCtx->r8 = pVM->rem.s.Env.regs[8];
2842 pCtx->r9 = pVM->rem.s.Env.regs[9];
2843 pCtx->r10 = pVM->rem.s.Env.regs[10];
2844 pCtx->r11 = pVM->rem.s.Env.regs[11];
2845 pCtx->r12 = pVM->rem.s.Env.regs[12];
2846 pCtx->r13 = pVM->rem.s.Env.regs[13];
2847 pCtx->r14 = pVM->rem.s.Env.regs[14];
2848 pCtx->r15 = pVM->rem.s.Env.regs[15];
2849
2850 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2851#else
2852 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2853 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2854 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2855 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2856 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2857 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2858 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2859
2860 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2861#endif
2862
2863 SYNC_BACK_SREG(es, ES);
2864 SYNC_BACK_SREG(cs, CS);
2865 SYNC_BACK_SREG(ss, SS);
2866 SYNC_BACK_SREG(ds, DS);
2867 SYNC_BACK_SREG(fs, FS);
2868 SYNC_BACK_SREG(gs, GS);
2869
2870#ifdef TARGET_X86_64
2871 pCtx->rip = pVM->rem.s.Env.eip;
2872 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2873#else
2874 pCtx->eip = pVM->rem.s.Env.eip;
2875 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2876#endif
2877
2878 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2879 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2880 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2881#ifdef VBOX_WITH_RAW_MODE
2882 if (((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
2883 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2884#endif
2885 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2886
2887 for (i = 0; i < 8; i++)
2888 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2889
2890 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2891 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2892 {
2893 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2894 STAM_COUNTER_INC(&gStatREMGDTChange);
2895#ifdef VBOX_WITH_RAW_MODE
2896 if (!HMIsEnabled(pVM))
2897 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2898#endif
2899 }
2900
2901 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2902 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2903 {
2904 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2905 STAM_COUNTER_INC(&gStatREMIDTChange);
2906#ifdef VBOX_WITH_RAW_MODE
2907 if (!HMIsEnabled(pVM))
2908 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2909#endif
2910 }
2911
2912 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2913 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2914 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2915 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2916 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2917 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2918 )
2919 {
2920 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2921 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2922 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2923 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2924 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2925 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2926 STAM_COUNTER_INC(&gStatREMLDTRChange);
2927#ifdef VBOX_WITH_RAW_MODE
2928 if (!HMIsEnabled(pVM))
2929 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2930#endif
2931 }
2932
2933 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2934 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2935 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2936 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2937 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2938 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2939 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT
2940 : 0)
2941 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2942 )
2943 {
2944 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2945 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2946 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2947 (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2948 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT : 0));
2949 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2950 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2951 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2952 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2953 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2954 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2955 if (pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE)
2956 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> SEL_FLAGS_SHIFT;
2957 STAM_COUNTER_INC(&gStatREMTRChange);
2958#ifdef VBOX_WITH_RAW_MODE
2959 if (!HMIsEnabled(pVM))
2960 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2961#endif
2962 }
2963
2964 /* Sysenter MSR */
2965 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2966 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2967 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2968
2969 /* System MSRs. */
2970 pCtx->msrEFER = pVM->rem.s.Env.efer;
2971 pCtx->msrSTAR = pVM->rem.s.Env.star;
2972 pCtx->msrPAT = pVM->rem.s.Env.pat;
2973#ifdef TARGET_X86_64
2974 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2975 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2976 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2977 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2978#endif
2979
2980}
2981
2982
2983/**
2984 * Update the VMM state information if we're currently in REM.
2985 *
2986 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2987 * we're currently executing in REM and the VMM state is invalid. This method will of
2988 * course check that we're executing in REM before syncing any data over to the VMM.
2989 *
2990 * @param pVM The VM handle.
2991 * @param pVCpu The VMCPU handle.
2992 */
2993REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2994{
2995 if (pVM->rem.s.fInREM)
2996 remR3StateUpdate(pVM, pVCpu);
2997}
2998
2999
3000#undef LOG_GROUP
3001#define LOG_GROUP LOG_GROUP_REM
3002
3003
3004/**
3005 * Notify the recompiler about Address Gate 20 state change.
3006 *
3007 * This notification is required since A20 gate changes are
3008 * initialized from a device driver and the VM might just as
3009 * well be in REM mode as in RAW mode.
3010 *
3011 * @param pVM VM handle.
3012 * @param pVCpu VMCPU handle.
3013 * @param fEnable True if the gate should be enabled.
3014 * False if the gate should be disabled.
3015 */
3016REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
3017{
3018 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
3019 VM_ASSERT_EMT(pVM);
3020
3021 /** @todo SMP and the A20 gate... */
3022 if (pVM->rem.s.Env.pVCpu == pVCpu)
3023 {
3024 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3025 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
3026 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3027 }
3028}
3029
3030
3031/**
3032 * Replays the handler notification changes
3033 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
3034 *
3035 * @param pVM VM handle.
3036 */
3037REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
3038{
3039 /*
3040 * Replay the flushes.
3041 */
3042 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
3043 VM_ASSERT_EMT(pVM);
3044
3045 /** @todo this isn't ensuring correct replay order. */
3046 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
3047 {
3048 uint32_t idxNext;
3049 uint32_t idxRevHead;
3050 uint32_t idxHead;
3051#ifdef VBOX_STRICT
3052 int32_t c = 0;
3053#endif
3054
3055 /* Lockless purging of pending notifications. */
3056 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3057 if (idxHead == UINT32_MAX)
3058 return;
3059 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3060
3061 /*
3062 * Reverse the list to process it in FIFO order.
3063 */
3064 idxRevHead = UINT32_MAX;
3065 do
3066 {
3067 /* Save the index of the next rec. */
3068 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3069 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3070 /* Push the record onto the reversed list. */
3071 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3072 idxRevHead = idxHead;
3073 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3074 /* Advance. */
3075 idxHead = idxNext;
3076 } while (idxHead != UINT32_MAX);
3077
3078 /*
3079 * Loop thru the list, reinserting the record into the free list as they are
3080 * processed to avoid having other EMTs running out of entries while we're flushing.
3081 */
3082 idxHead = idxRevHead;
3083 do
3084 {
3085 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3086 uint32_t idxCur;
3087 Assert(--c >= 0);
3088
3089 switch (pCur->enmKind)
3090 {
3091 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3092 remR3NotifyHandlerPhysicalRegister(pVM,
3093 pCur->u.PhysicalRegister.enmKind,
3094 pCur->u.PhysicalRegister.GCPhys,
3095 pCur->u.PhysicalRegister.cb,
3096 pCur->u.PhysicalRegister.fHasHCHandler);
3097 break;
3098
3099 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3100 remR3NotifyHandlerPhysicalDeregister(pVM,
3101 pCur->u.PhysicalDeregister.enmKind,
3102 pCur->u.PhysicalDeregister.GCPhys,
3103 pCur->u.PhysicalDeregister.cb,
3104 pCur->u.PhysicalDeregister.fHasHCHandler,
3105 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3106 break;
3107
3108 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3109 remR3NotifyHandlerPhysicalModify(pVM,
3110 pCur->u.PhysicalModify.enmKind,
3111 pCur->u.PhysicalModify.GCPhysOld,
3112 pCur->u.PhysicalModify.GCPhysNew,
3113 pCur->u.PhysicalModify.cb,
3114 pCur->u.PhysicalModify.fHasHCHandler,
3115 pCur->u.PhysicalModify.fRestoreAsRAM);
3116 break;
3117
3118 default:
3119 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3120 break;
3121 }
3122
3123 /*
3124 * Advance idxHead.
3125 */
3126 idxCur = idxHead;
3127 idxHead = pCur->idxNext;
3128 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3129
3130 /*
3131 * Put the record back into the free list.
3132 */
3133 do
3134 {
3135 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3136 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3137 ASMCompilerBarrier();
3138 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3139 } while (idxHead != UINT32_MAX);
3140
3141#ifdef VBOX_STRICT
3142 if (pVM->cCpus == 1)
3143 {
3144 unsigned c;
3145 /* Check that all records are now on the free list. */
3146 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3147 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3148 c++;
3149 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3150 }
3151#endif
3152 }
3153}
3154
3155
3156/**
3157 * Notify REM about changed code page.
3158 *
3159 * @returns VBox status code.
3160 * @param pVM VM handle.
3161 * @param pVCpu VMCPU handle.
3162 * @param pvCodePage Code page address
3163 */
3164REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3165{
3166#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3167 int rc;
3168 RTGCPHYS PhysGC;
3169 uint64_t flags;
3170
3171 VM_ASSERT_EMT(pVM);
3172
3173 /*
3174 * Get the physical page address.
3175 */
3176 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3177 if (rc == VINF_SUCCESS)
3178 {
3179 /*
3180 * Sync the required registers and flush the whole page.
3181 * (Easier to do the whole page than notifying it about each physical
3182 * byte that was changed.
3183 */
3184 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3185 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3186 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3187 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3188
3189 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3190 }
3191#endif
3192 return VINF_SUCCESS;
3193}
3194
3195
3196/**
3197 * Notification about a successful MMR3PhysRegister() call.
3198 *
3199 * @param pVM VM handle.
3200 * @param GCPhys The physical address the RAM.
3201 * @param cb Size of the memory.
3202 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3203 */
3204REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3205{
3206 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3207 VM_ASSERT_EMT(pVM);
3208
3209 /*
3210 * Validate input - we trust the caller.
3211 */
3212 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3213 Assert(cb);
3214 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3215 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3216
3217 /*
3218 * Base ram? Update GCPhysLastRam.
3219 */
3220 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3221 {
3222 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3223 {
3224 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3225 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3226 }
3227 }
3228
3229 /*
3230 * Register the ram.
3231 */
3232 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3233
3234 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3235 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3236 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3237
3238 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3239}
3240
3241
3242/**
3243 * Notification about a successful MMR3PhysRomRegister() call.
3244 *
3245 * @param pVM VM handle.
3246 * @param GCPhys The physical address of the ROM.
3247 * @param cb The size of the ROM.
3248 * @param pvCopy Pointer to the ROM copy.
3249 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3250 * This function will be called when ever the protection of the
3251 * shadow ROM changes (at reset and end of POST).
3252 */
3253REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3254{
3255 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3256 VM_ASSERT_EMT(pVM);
3257
3258 /*
3259 * Validate input - we trust the caller.
3260 */
3261 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3262 Assert(cb);
3263 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3264
3265 /*
3266 * Register the rom.
3267 */
3268 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3269
3270 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3271 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3272 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3273
3274 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3275}
3276
3277
3278/**
3279 * Notification about a successful memory deregistration or reservation.
3280 *
3281 * @param pVM VM Handle.
3282 * @param GCPhys Start physical address.
3283 * @param cb The size of the range.
3284 */
3285REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3286{
3287 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3288 VM_ASSERT_EMT(pVM);
3289
3290 /*
3291 * Validate input - we trust the caller.
3292 */
3293 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3294 Assert(cb);
3295 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3296
3297 /*
3298 * Unassigning the memory.
3299 */
3300 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3301
3302 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3303 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3304 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3305
3306 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3307}
3308
3309
3310/**
3311 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3312 *
3313 * @param pVM VM Handle.
3314 * @param enmKind Kind of access handler.
3315 * @param GCPhys Handler range address.
3316 * @param cb Size of the handler range.
3317 * @param fHasHCHandler Set if the handler has a HC callback function.
3318 *
3319 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3320 * Handler memory type to memory which has no HC handler.
3321 */
3322static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3323 bool fHasHCHandler)
3324{
3325 Log(("REMR3NotifyHandlerPhysicalRegister: enmKind=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3326 enmKind, GCPhys, cb, fHasHCHandler));
3327
3328 VM_ASSERT_EMT(pVM);
3329 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3330 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3331
3332
3333 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3334
3335 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3336 if (enmKind == PGMPHYSHANDLERKIND_MMIO)
3337 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3338 else if (fHasHCHandler)
3339 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3340 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3341
3342 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3343}
3344
3345/**
3346 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3347 *
3348 * @param pVM VM Handle.
3349 * @param enmKind Kind of access handler.
3350 * @param GCPhys Handler range address.
3351 * @param cb Size of the handler range.
3352 * @param fHasHCHandler Set if the handler has a HC callback function.
3353 *
3354 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3355 * Handler memory type to memory which has no HC handler.
3356 */
3357REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3358 bool fHasHCHandler)
3359{
3360 REMR3ReplayHandlerNotifications(pVM);
3361
3362 remR3NotifyHandlerPhysicalRegister(pVM, enmKind, GCPhys, cb, fHasHCHandler);
3363}
3364
3365/**
3366 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3367 *
3368 * @param pVM VM Handle.
3369 * @param enmKind Kind of access handler.
3370 * @param GCPhys Handler range address.
3371 * @param cb Size of the handler range.
3372 * @param fHasHCHandler Set if the handler has a HC callback function.
3373 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3374 */
3375static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3376 bool fHasHCHandler, bool fRestoreAsRAM)
3377{
3378 Log(("REMR3NotifyHandlerPhysicalDeregister: enmKind=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3379 enmKind, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3380 VM_ASSERT_EMT(pVM);
3381
3382
3383 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3384
3385 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3386 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3387 if (enmKind == PGMPHYSHANDLERKIND_MMIO)
3388 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3389 else if (fHasHCHandler)
3390 {
3391 if (!fRestoreAsRAM)
3392 {
3393 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3394 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3395 }
3396 else
3397 {
3398 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3399 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3400 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3401 }
3402 }
3403 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3404
3405 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3406}
3407
3408/**
3409 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3410 *
3411 * @param pVM VM Handle.
3412 * @param enmKind Kind of access handler.
3413 * @param GCPhys Handler range address.
3414 * @param cb Size of the handler range.
3415 * @param fHasHCHandler Set if the handler has a HC callback function.
3416 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3417 */
3418REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3419{
3420 REMR3ReplayHandlerNotifications(pVM);
3421 remR3NotifyHandlerPhysicalDeregister(pVM, enmKind, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3422}
3423
3424
3425/**
3426 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3427 *
3428 * @param pVM VM Handle.
3429 * @param enmKind Kind of access handler.
3430 * @param GCPhysOld Old handler range address.
3431 * @param GCPhysNew New handler range address.
3432 * @param cb Size of the handler range.
3433 * @param fHasHCHandler Set if the handler has a HC callback function.
3434 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3435 */
3436static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3437{
3438 Log(("REMR3NotifyHandlerPhysicalModify: enmKind=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3439 enmKind, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3440 VM_ASSERT_EMT(pVM);
3441 AssertReleaseMsg(enmKind != PGMPHYSHANDLERKIND_MMIO, ("enmKind=%d\n", enmKind));
3442
3443 if (fHasHCHandler)
3444 {
3445 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3446
3447 /*
3448 * Reset the old page.
3449 */
3450 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3451 if (!fRestoreAsRAM)
3452 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3453 else
3454 {
3455 /* This is not perfect, but it'll do for PD monitoring... */
3456 Assert(cb == PAGE_SIZE);
3457 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3458 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3459 }
3460
3461 /*
3462 * Update the new page.
3463 */
3464 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3465 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3466 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3467 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3468
3469 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3470 }
3471}
3472
3473/**
3474 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3475 *
3476 * @param pVM VM Handle.
3477 * @param enmKind Kind of access handler.
3478 * @param GCPhysOld Old handler range address.
3479 * @param GCPhysNew New handler range address.
3480 * @param cb Size of the handler range.
3481 * @param fHasHCHandler Set if the handler has a HC callback function.
3482 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3483 */
3484REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3485{
3486 REMR3ReplayHandlerNotifications(pVM);
3487
3488 remR3NotifyHandlerPhysicalModify(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3489}
3490
3491/**
3492 * Checks if we're handling access to this page or not.
3493 *
3494 * @returns true if we're trapping access.
3495 * @returns false if we aren't.
3496 * @param pVM The VM handle.
3497 * @param GCPhys The physical address.
3498 *
3499 * @remark This function will only work correctly in VBOX_STRICT builds!
3500 */
3501REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3502{
3503#ifdef VBOX_STRICT
3504 ram_addr_t off;
3505 REMR3ReplayHandlerNotifications(pVM);
3506
3507 off = get_phys_page_offset(GCPhys);
3508 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3509 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3510 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3511#else
3512 return false;
3513#endif
3514}
3515
3516
3517/**
3518 * Deals with a rare case in get_phys_addr_code where the code
3519 * is being monitored.
3520 *
3521 * It could also be an MMIO page, in which case we will raise a fatal error.
3522 *
3523 * @returns The physical address corresponding to addr.
3524 * @param env The cpu environment.
3525 * @param addr The virtual address.
3526 * @param pTLBEntry The TLB entry.
3527 */
3528target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3529 target_ulong addr,
3530 CPUTLBEntry *pTLBEntry,
3531 target_phys_addr_t ioTLBEntry)
3532{
3533 PVM pVM = env->pVM;
3534
3535 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3536 {
3537 /* If code memory is being monitored, appropriate IOTLB entry will have
3538 handler IO type, and addend will provide real physical address, no
3539 matter if we store VA in TLB or not, as handlers are always passed PA */
3540 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3541 return ret;
3542 }
3543 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3544 "*** handlers\n",
3545 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3546 DBGFR3Info(pVM->pUVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3547 LogRel(("*** mmio\n"));
3548 DBGFR3Info(pVM->pUVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3549 LogRel(("*** phys\n"));
3550 DBGFR3Info(pVM->pUVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3551 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3552 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3553 AssertFatalFailed();
3554}
3555
3556/**
3557 * Read guest RAM and ROM.
3558 *
3559 * @param SrcGCPhys The source address (guest physical).
3560 * @param pvDst The destination address.
3561 * @param cb Number of bytes
3562 */
3563void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3564{
3565 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3566 VBOX_CHECK_ADDR(SrcGCPhys);
3567 VBOXSTRICTRC rcStrict = PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb, PGMACCESSORIGIN_REM);
3568 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3569#ifdef VBOX_DEBUG_PHYS
3570 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3571#endif
3572 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3573}
3574
3575
3576/**
3577 * Read guest RAM and ROM, unsigned 8-bit.
3578 *
3579 * @param SrcGCPhys The source address (guest physical).
3580 */
3581RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3582{
3583 uint8_t val;
3584 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3585 VBOX_CHECK_ADDR(SrcGCPhys);
3586 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3587 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3588#ifdef VBOX_DEBUG_PHYS
3589 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3590#endif
3591 return val;
3592}
3593
3594
3595/**
3596 * Read guest RAM and ROM, signed 8-bit.
3597 *
3598 * @param SrcGCPhys The source address (guest physical).
3599 */
3600RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3601{
3602 int8_t val;
3603 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3604 VBOX_CHECK_ADDR(SrcGCPhys);
3605 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3606 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3607#ifdef VBOX_DEBUG_PHYS
3608 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3609#endif
3610 return val;
3611}
3612
3613
3614/**
3615 * Read guest RAM and ROM, unsigned 16-bit.
3616 *
3617 * @param SrcGCPhys The source address (guest physical).
3618 */
3619RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3620{
3621 uint16_t val;
3622 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3623 VBOX_CHECK_ADDR(SrcGCPhys);
3624 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3625 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3626#ifdef VBOX_DEBUG_PHYS
3627 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3628#endif
3629 return val;
3630}
3631
3632
3633/**
3634 * Read guest RAM and ROM, signed 16-bit.
3635 *
3636 * @param SrcGCPhys The source address (guest physical).
3637 */
3638RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3639{
3640 int16_t val;
3641 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3642 VBOX_CHECK_ADDR(SrcGCPhys);
3643 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3644 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3645#ifdef VBOX_DEBUG_PHYS
3646 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3647#endif
3648 return val;
3649}
3650
3651
3652/**
3653 * Read guest RAM and ROM, unsigned 32-bit.
3654 *
3655 * @param SrcGCPhys The source address (guest physical).
3656 */
3657RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3658{
3659 uint32_t val;
3660 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3661 VBOX_CHECK_ADDR(SrcGCPhys);
3662 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3663 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3664#ifdef VBOX_DEBUG_PHYS
3665 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3666#endif
3667 return val;
3668}
3669
3670
3671/**
3672 * Read guest RAM and ROM, signed 32-bit.
3673 *
3674 * @param SrcGCPhys The source address (guest physical).
3675 */
3676RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3677{
3678 int32_t val;
3679 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3680 VBOX_CHECK_ADDR(SrcGCPhys);
3681 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3682 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3683#ifdef VBOX_DEBUG_PHYS
3684 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3685#endif
3686 return val;
3687}
3688
3689
3690/**
3691 * Read guest RAM and ROM, unsigned 64-bit.
3692 *
3693 * @param SrcGCPhys The source address (guest physical).
3694 */
3695uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3696{
3697 uint64_t val;
3698 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3699 VBOX_CHECK_ADDR(SrcGCPhys);
3700 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3701 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3702#ifdef VBOX_DEBUG_PHYS
3703 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3704#endif
3705 return val;
3706}
3707
3708
3709/**
3710 * Read guest RAM and ROM, signed 64-bit.
3711 *
3712 * @param SrcGCPhys The source address (guest physical).
3713 */
3714int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3715{
3716 int64_t val;
3717 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3718 VBOX_CHECK_ADDR(SrcGCPhys);
3719 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3720 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3721#ifdef VBOX_DEBUG_PHYS
3722 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3723#endif
3724 return val;
3725}
3726
3727
3728/**
3729 * Write guest RAM.
3730 *
3731 * @param DstGCPhys The destination address (guest physical).
3732 * @param pvSrc The source address.
3733 * @param cb Number of bytes to write
3734 */
3735void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3736{
3737 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3738 VBOX_CHECK_ADDR(DstGCPhys);
3739 VBOXSTRICTRC rcStrict = PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb, PGMACCESSORIGIN_REM);
3740 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3741 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3742#ifdef VBOX_DEBUG_PHYS
3743 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3744#endif
3745}
3746
3747
3748/**
3749 * Write guest RAM, unsigned 8-bit.
3750 *
3751 * @param DstGCPhys The destination address (guest physical).
3752 * @param val Value
3753 */
3754void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3755{
3756 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3757 VBOX_CHECK_ADDR(DstGCPhys);
3758 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3759 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3760#ifdef VBOX_DEBUG_PHYS
3761 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3762#endif
3763}
3764
3765
3766/**
3767 * Write guest RAM, unsigned 8-bit.
3768 *
3769 * @param DstGCPhys The destination address (guest physical).
3770 * @param val Value
3771 */
3772void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3773{
3774 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3775 VBOX_CHECK_ADDR(DstGCPhys);
3776 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3777 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3778#ifdef VBOX_DEBUG_PHYS
3779 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3780#endif
3781}
3782
3783
3784/**
3785 * Write guest RAM, unsigned 32-bit.
3786 *
3787 * @param DstGCPhys The destination address (guest physical).
3788 * @param val Value
3789 */
3790void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3791{
3792 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3793 VBOX_CHECK_ADDR(DstGCPhys);
3794 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3795 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3796#ifdef VBOX_DEBUG_PHYS
3797 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3798#endif
3799}
3800
3801
3802/**
3803 * Write guest RAM, unsigned 64-bit.
3804 *
3805 * @param DstGCPhys The destination address (guest physical).
3806 * @param val Value
3807 */
3808void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3809{
3810 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3811 VBOX_CHECK_ADDR(DstGCPhys);
3812 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3813 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3814#ifdef VBOX_DEBUG_PHYS
3815 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3816#endif
3817}
3818
3819#undef LOG_GROUP
3820#define LOG_GROUP LOG_GROUP_REM_MMIO
3821
3822/** Read MMIO memory. */
3823static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys)
3824{
3825 CPUX86State *env = (CPUX86State *)pvEnv;
3826 uint32_t u32 = 0;
3827 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 1);
3828 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3829 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3830 return u32;
3831}
3832
3833/** Read MMIO memory. */
3834static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys)
3835{
3836 CPUX86State *env = (CPUX86State *)pvEnv;
3837 uint32_t u32 = 0;
3838 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 2);
3839 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3840 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3841 return u32;
3842}
3843
3844/** Read MMIO memory. */
3845static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys)
3846{
3847 CPUX86State *env = (CPUX86State *)pvEnv;
3848 uint32_t u32 = 0;
3849 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 4);
3850 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3851 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3852 return u32;
3853}
3854
3855/** Write to MMIO memory. */
3856static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3857{
3858 CPUX86State *env = (CPUX86State *)pvEnv;
3859 int rc;
3860 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3861 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 1);
3862 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3863}
3864
3865/** Write to MMIO memory. */
3866static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3867{
3868 CPUX86State *env = (CPUX86State *)pvEnv;
3869 int rc;
3870 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3871 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 2);
3872 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3873}
3874
3875/** Write to MMIO memory. */
3876static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3877{
3878 CPUX86State *env = (CPUX86State *)pvEnv;
3879 int rc;
3880 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3881 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 4);
3882 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3883}
3884
3885
3886#undef LOG_GROUP
3887#define LOG_GROUP LOG_GROUP_REM_HANDLER
3888
3889/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3890
3891static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3892{
3893 uint8_t u8;
3894 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3895 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8), PGMACCESSORIGIN_REM);
3896 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3897 return u8;
3898}
3899
3900static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3901{
3902 uint16_t u16;
3903 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3904 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16), PGMACCESSORIGIN_REM);
3905 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3906 return u16;
3907}
3908
3909static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3910{
3911 uint32_t u32;
3912 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3913 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32), PGMACCESSORIGIN_REM);
3914 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3915 return u32;
3916}
3917
3918static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3919{
3920 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3921 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t), PGMACCESSORIGIN_REM);
3922 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3923}
3924
3925static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3926{
3927 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3928 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t), PGMACCESSORIGIN_REM);
3929 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3930}
3931
3932static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3933{
3934 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3935 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t), PGMACCESSORIGIN_REM);
3936 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3937}
3938
3939/* -+- disassembly -+- */
3940
3941#undef LOG_GROUP
3942#define LOG_GROUP LOG_GROUP_REM_DISAS
3943
3944
3945/**
3946 * Enables or disables singled stepped disassembly.
3947 *
3948 * @returns VBox status code.
3949 * @param pVM VM handle.
3950 * @param fEnable To enable set this flag, to disable clear it.
3951 */
3952static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3953{
3954 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3955 VM_ASSERT_EMT(pVM);
3956
3957 if (fEnable)
3958 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3959 else
3960 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3961#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3962 cpu_single_step(&pVM->rem.s.Env, fEnable);
3963#endif
3964 return VINF_SUCCESS;
3965}
3966
3967
3968/**
3969 * Enables or disables singled stepped disassembly.
3970 *
3971 * @returns VBox status code.
3972 * @param pVM VM handle.
3973 * @param fEnable To enable set this flag, to disable clear it.
3974 */
3975REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3976{
3977 int rc;
3978
3979 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3980 if (VM_IS_EMT(pVM))
3981 return remR3DisasEnableStepping(pVM, fEnable);
3982
3983 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3984 AssertRC(rc);
3985 return rc;
3986}
3987
3988
3989#ifdef VBOX_WITH_DEBUGGER
3990/**
3991 * External Debugger Command: .remstep [on|off|1|0]
3992 */
3993static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM,
3994 PCDBGCVAR paArgs, unsigned cArgs)
3995{
3996 int rc;
3997 PVM pVM = pUVM->pVM;
3998
3999 if (cArgs == 0)
4000 /*
4001 * Print the current status.
4002 */
4003 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
4004 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
4005 else
4006 {
4007 /*
4008 * Convert the argument and change the mode.
4009 */
4010 bool fEnable;
4011 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
4012 if (RT_SUCCESS(rc))
4013 {
4014 rc = REMR3DisasEnableStepping(pVM, fEnable);
4015 if (RT_SUCCESS(rc))
4016 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
4017 else
4018 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
4019 }
4020 else
4021 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
4022 }
4023 return rc;
4024}
4025#endif /* VBOX_WITH_DEBUGGER */
4026
4027
4028/**
4029 * Disassembles one instruction and prints it to the log.
4030 *
4031 * @returns Success indicator.
4032 * @param env Pointer to the recompiler CPU structure.
4033 * @param f32BitCode Indicates that whether or not the code should
4034 * be disassembled as 16 or 32 bit. If -1 the CS
4035 * selector will be inspected.
4036 * @param pszPrefix
4037 */
4038bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
4039{
4040 PVM pVM = env->pVM;
4041 const bool fLog = LogIsEnabled();
4042 const bool fLog2 = LogIs2Enabled();
4043 int rc = VINF_SUCCESS;
4044
4045 /*
4046 * Don't bother if there ain't any log output to do.
4047 */
4048 if (!fLog && !fLog2)
4049 return true;
4050
4051 /*
4052 * Update the state so DBGF reads the correct register values.
4053 */
4054 remR3StateUpdate(pVM, env->pVCpu);
4055
4056 /*
4057 * Log registers if requested.
4058 */
4059 if (fLog2)
4060 DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
4061
4062 /*
4063 * Disassemble to log.
4064 */
4065 if (fLog)
4066 {
4067 PVMCPU pVCpu = VMMGetCpu(pVM);
4068 char szBuf[256];
4069 szBuf[0] = '\0';
4070 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM,
4071 pVCpu->idCpu,
4072 0, /* Sel */ 0, /* GCPtr */
4073 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4074 szBuf,
4075 sizeof(szBuf),
4076 NULL);
4077 if (RT_FAILURE(rc))
4078 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4079 if (pszPrefix && *pszPrefix)
4080 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4081 else
4082 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4083 }
4084
4085 return RT_SUCCESS(rc);
4086}
4087
4088
4089/**
4090 * Disassemble recompiled code.
4091 *
4092 * @param phFileIgnored Ignored, logfile usually.
4093 * @param pvCode Pointer to the code block.
4094 * @param cb Size of the code block.
4095 */
4096void disas(FILE *phFile, void *pvCode, unsigned long cb)
4097{
4098 if (LogIs2Enabled())
4099 {
4100 unsigned off = 0;
4101 char szOutput[256];
4102 DISCPUSTATE Cpu;
4103#ifdef RT_ARCH_X86
4104 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
4105#else
4106 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
4107#endif
4108
4109 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4110 while (off < cb)
4111 {
4112 uint32_t cbInstr;
4113 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
4114 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
4115 if (RT_SUCCESS(rc))
4116 RTLogPrintf("%s", szOutput);
4117 else
4118 {
4119 RTLogPrintf("disas error %Rrc\n", rc);
4120 cbInstr = 1;
4121 }
4122 off += cbInstr;
4123 }
4124 }
4125}
4126
4127
4128/**
4129 * Disassemble guest code.
4130 *
4131 * @param phFileIgnored Ignored, logfile usually.
4132 * @param uCode The guest address of the code to disassemble. (flat?)
4133 * @param cb Number of bytes to disassemble.
4134 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4135 */
4136void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4137{
4138 if (LogIs2Enabled())
4139 {
4140 PVM pVM = cpu_single_env->pVM;
4141 PVMCPU pVCpu = cpu_single_env->pVCpu;
4142 RTSEL cs;
4143 RTGCUINTPTR eip;
4144
4145 Assert(pVCpu);
4146
4147 /*
4148 * Update the state so DBGF reads the correct register values (flags).
4149 */
4150 remR3StateUpdate(pVM, pVCpu);
4151
4152 /*
4153 * Do the disassembling.
4154 */
4155 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4156 cs = cpu_single_env->segs[R_CS].selector;
4157 eip = uCode - cpu_single_env->segs[R_CS].base;
4158 for (;;)
4159 {
4160 char szBuf[256];
4161 uint32_t cbInstr;
4162 int rc = DBGFR3DisasInstrEx(pVM->pUVM,
4163 pVCpu->idCpu,
4164 cs,
4165 eip,
4166 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4167 szBuf, sizeof(szBuf),
4168 &cbInstr);
4169 if (RT_SUCCESS(rc))
4170 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4171 else
4172 {
4173 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4174 cbInstr = 1;
4175 }
4176
4177 /* next */
4178 if (cb <= cbInstr)
4179 break;
4180 cb -= cbInstr;
4181 uCode += cbInstr;
4182 eip += cbInstr;
4183 }
4184 }
4185}
4186
4187
4188/**
4189 * Looks up a guest symbol.
4190 *
4191 * @returns Pointer to symbol name. This is a static buffer.
4192 * @param orig_addr The address in question.
4193 */
4194const char *lookup_symbol(target_ulong orig_addr)
4195{
4196 PVM pVM = cpu_single_env->pVM;
4197 RTGCINTPTR off = 0;
4198 RTDBGSYMBOL Sym;
4199 DBGFADDRESS Addr;
4200
4201 int rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, orig_addr),
4202 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL, &off, &Sym, NULL /*phMod*/);
4203 if (RT_SUCCESS(rc))
4204 {
4205 static char szSym[sizeof(Sym.szName) + 48];
4206 if (!off)
4207 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4208 else if (off > 0)
4209 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4210 else
4211 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4212 return szSym;
4213 }
4214 return "<N/A>";
4215}
4216
4217
4218#undef LOG_GROUP
4219#define LOG_GROUP LOG_GROUP_REM
4220
4221
4222/* -+- FF notifications -+- */
4223
4224
4225/**
4226 * Notification about a pending interrupt.
4227 *
4228 * @param pVM VM Handle.
4229 * @param pVCpu VMCPU Handle.
4230 * @param u8Interrupt Interrupt
4231 * @thread The emulation thread.
4232 */
4233REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4234{
4235 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4236 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4237}
4238
4239/**
4240 * Notification about a pending interrupt.
4241 *
4242 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4243 * @param pVM VM Handle.
4244 * @param pVCpu VMCPU Handle.
4245 * @thread The emulation thread.
4246 */
4247REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4248{
4249 return pVM->rem.s.u32PendingInterrupt;
4250}
4251
4252/**
4253 * Notification about the interrupt FF being set.
4254 *
4255 * @param pVM VM Handle.
4256 * @param pVCpu VMCPU Handle.
4257 * @thread The emulation thread.
4258 */
4259REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4260{
4261#ifndef IEM_VERIFICATION_MODE
4262 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4263 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4264 if (pVM->rem.s.fInREM)
4265 {
4266 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4267 CPU_INTERRUPT_EXTERNAL_HARD);
4268 }
4269#endif
4270}
4271
4272
4273/**
4274 * Notification about the interrupt FF being set.
4275 *
4276 * @param pVM VM Handle.
4277 * @param pVCpu VMCPU Handle.
4278 * @thread Any.
4279 */
4280REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4281{
4282 LogFlow(("REMR3NotifyInterruptClear:\n"));
4283 if (pVM->rem.s.fInREM)
4284 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4285}
4286
4287
4288/**
4289 * Notification about pending timer(s).
4290 *
4291 * @param pVM VM Handle.
4292 * @param pVCpuDst The target cpu for this notification.
4293 * TM will not broadcast pending timer events, but use
4294 * a dedicated EMT for them. So, only interrupt REM
4295 * execution if the given CPU is executing in REM.
4296 * @thread Any.
4297 */
4298REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4299{
4300#ifndef IEM_VERIFICATION_MODE
4301#ifndef DEBUG_bird
4302 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4303#endif
4304 if (pVM->rem.s.fInREM)
4305 {
4306 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4307 {
4308 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4309 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4310 CPU_INTERRUPT_EXTERNAL_TIMER);
4311 }
4312 else
4313 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4314 }
4315 else
4316 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4317#endif
4318}
4319
4320
4321/**
4322 * Notification about pending DMA transfers.
4323 *
4324 * @param pVM VM Handle.
4325 * @thread Any.
4326 */
4327REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4328{
4329#ifndef IEM_VERIFICATION_MODE
4330 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4331 if (pVM->rem.s.fInREM)
4332 {
4333 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4334 CPU_INTERRUPT_EXTERNAL_DMA);
4335 }
4336#endif
4337}
4338
4339
4340/**
4341 * Notification about pending timer(s).
4342 *
4343 * @param pVM VM Handle.
4344 * @thread Any.
4345 */
4346REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4347{
4348#ifndef IEM_VERIFICATION_MODE
4349 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4350 if (pVM->rem.s.fInREM)
4351 {
4352 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4353 CPU_INTERRUPT_EXTERNAL_EXIT);
4354 }
4355#endif
4356}
4357
4358
4359/**
4360 * Notification about pending FF set by an external thread.
4361 *
4362 * @param pVM VM handle.
4363 * @thread Any.
4364 */
4365REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4366{
4367#ifndef IEM_VERIFICATION_MODE
4368 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4369 if (pVM->rem.s.fInREM)
4370 {
4371 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4372 CPU_INTERRUPT_EXTERNAL_EXIT);
4373 }
4374#endif
4375}
4376
4377
4378#ifdef VBOX_WITH_STATISTICS
4379void remR3ProfileStart(int statcode)
4380{
4381 STAMPROFILEADV *pStat;
4382 switch(statcode)
4383 {
4384 case STATS_EMULATE_SINGLE_INSTR:
4385 pStat = &gStatExecuteSingleInstr;
4386 break;
4387 case STATS_QEMU_COMPILATION:
4388 pStat = &gStatCompilationQEmu;
4389 break;
4390 case STATS_QEMU_RUN_EMULATED_CODE:
4391 pStat = &gStatRunCodeQEmu;
4392 break;
4393 case STATS_QEMU_TOTAL:
4394 pStat = &gStatTotalTimeQEmu;
4395 break;
4396 case STATS_QEMU_RUN_TIMERS:
4397 pStat = &gStatTimers;
4398 break;
4399 case STATS_TLB_LOOKUP:
4400 pStat= &gStatTBLookup;
4401 break;
4402 case STATS_IRQ_HANDLING:
4403 pStat= &gStatIRQ;
4404 break;
4405 case STATS_RAW_CHECK:
4406 pStat = &gStatRawCheck;
4407 break;
4408
4409 default:
4410 AssertMsgFailed(("unknown stat %d\n", statcode));
4411 return;
4412 }
4413 STAM_PROFILE_ADV_START(pStat, a);
4414}
4415
4416
4417void remR3ProfileStop(int statcode)
4418{
4419 STAMPROFILEADV *pStat;
4420 switch(statcode)
4421 {
4422 case STATS_EMULATE_SINGLE_INSTR:
4423 pStat = &gStatExecuteSingleInstr;
4424 break;
4425 case STATS_QEMU_COMPILATION:
4426 pStat = &gStatCompilationQEmu;
4427 break;
4428 case STATS_QEMU_RUN_EMULATED_CODE:
4429 pStat = &gStatRunCodeQEmu;
4430 break;
4431 case STATS_QEMU_TOTAL:
4432 pStat = &gStatTotalTimeQEmu;
4433 break;
4434 case STATS_QEMU_RUN_TIMERS:
4435 pStat = &gStatTimers;
4436 break;
4437 case STATS_TLB_LOOKUP:
4438 pStat= &gStatTBLookup;
4439 break;
4440 case STATS_IRQ_HANDLING:
4441 pStat= &gStatIRQ;
4442 break;
4443 case STATS_RAW_CHECK:
4444 pStat = &gStatRawCheck;
4445 break;
4446 default:
4447 AssertMsgFailed(("unknown stat %d\n", statcode));
4448 return;
4449 }
4450 STAM_PROFILE_ADV_STOP(pStat, a);
4451}
4452#endif
4453
4454/**
4455 * Raise an RC, force rem exit.
4456 *
4457 * @param pVM VM handle.
4458 * @param rc The rc.
4459 */
4460void remR3RaiseRC(PVM pVM, int rc)
4461{
4462 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4463 Assert(pVM->rem.s.fInREM);
4464 VM_ASSERT_EMT(pVM);
4465 pVM->rem.s.rc = rc;
4466 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4467}
4468
4469
4470/* -+- timers -+- */
4471
4472uint64_t cpu_get_tsc(CPUX86State *env)
4473{
4474 STAM_COUNTER_INC(&gStatCpuGetTSC);
4475 return TMCpuTickGet(env->pVCpu);
4476}
4477
4478
4479/* -+- interrupts -+- */
4480
4481void cpu_set_ferr(CPUX86State *env)
4482{
4483 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4484 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4485}
4486
4487int cpu_get_pic_interrupt(CPUX86State *env)
4488{
4489 uint8_t u8Interrupt;
4490 int rc;
4491
4492 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4493 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4494 * with the (a)pic.
4495 */
4496 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4497 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4498 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4499 * remove this kludge. */
4500 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4501 {
4502 rc = VINF_SUCCESS;
4503 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4504 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4505 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4506 }
4507 else
4508 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4509
4510 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4511 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4512 if (RT_SUCCESS(rc))
4513 {
4514 if (VMCPU_FF_IS_PENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4515 env->interrupt_request |= CPU_INTERRUPT_HARD;
4516 return u8Interrupt;
4517 }
4518 return -1;
4519}
4520
4521
4522/* -+- local apic -+- */
4523
4524#if 0 /* CPUMSetGuestMsr does this now. */
4525void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4526{
4527 int rc = PDMApicSetBase(env->pVM, val);
4528 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4529}
4530#endif
4531
4532uint64_t cpu_get_apic_base(CPUX86State *env)
4533{
4534 uint64_t u64;
4535 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(env->pVCpu, MSR_IA32_APICBASE, &u64);
4536 if (RT_SUCCESS(rcStrict))
4537 {
4538 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4539 return u64;
4540 }
4541 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
4542 return 0;
4543}
4544
4545void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4546{
4547 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4548 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4549}
4550
4551uint8_t cpu_get_apic_tpr(CPUX86State *env)
4552{
4553 uint8_t u8;
4554 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL, NULL);
4555 if (RT_SUCCESS(rc))
4556 {
4557 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4558 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4559 }
4560 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4561 return 0;
4562}
4563
4564/**
4565 * Read an MSR.
4566 *
4567 * @retval 0 success.
4568 * @retval -1 failure, raise \#GP(0).
4569 * @param env The cpu state.
4570 * @param idMsr The MSR to read.
4571 * @param puValue Where to return the value.
4572 */
4573int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4574{
4575 Assert(env->pVCpu);
4576 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4577}
4578
4579/**
4580 * Write to an MSR.
4581 *
4582 * @retval 0 success.
4583 * @retval -1 failure, raise \#GP(0).
4584 * @param env The cpu state.
4585 * @param idMsr The MSR to read.
4586 * @param puValue Where to return the value.
4587 */
4588int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4589{
4590 Assert(env->pVCpu);
4591 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4592}
4593
4594/* -+- I/O Ports -+- */
4595
4596#undef LOG_GROUP
4597#define LOG_GROUP LOG_GROUP_REM_IOPORT
4598
4599void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4600{
4601 int rc;
4602
4603 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4604 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4605
4606 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 1);
4607 if (RT_LIKELY(rc == VINF_SUCCESS))
4608 return;
4609 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4610 {
4611 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4612 remR3RaiseRC(env->pVM, rc);
4613 return;
4614 }
4615 remAbort(rc, __FUNCTION__);
4616}
4617
4618void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4619{
4620 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4621 int rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 2);
4622 if (RT_LIKELY(rc == VINF_SUCCESS))
4623 return;
4624 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4625 {
4626 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4627 remR3RaiseRC(env->pVM, rc);
4628 return;
4629 }
4630 remAbort(rc, __FUNCTION__);
4631}
4632
4633void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4634{
4635 int rc;
4636 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4637 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 4);
4638 if (RT_LIKELY(rc == VINF_SUCCESS))
4639 return;
4640 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4641 {
4642 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4643 remR3RaiseRC(env->pVM, rc);
4644 return;
4645 }
4646 remAbort(rc, __FUNCTION__);
4647}
4648
4649uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4650{
4651 uint32_t u32 = 0;
4652 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 1);
4653 if (RT_LIKELY(rc == VINF_SUCCESS))
4654 {
4655 if (/*addr != 0x61 && */addr != 0x71)
4656 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4657 return (uint8_t)u32;
4658 }
4659 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4660 {
4661 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4662 remR3RaiseRC(env->pVM, rc);
4663 return (uint8_t)u32;
4664 }
4665 remAbort(rc, __FUNCTION__);
4666 return UINT8_C(0xff);
4667}
4668
4669uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4670{
4671 uint32_t u32 = 0;
4672 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 2);
4673 if (RT_LIKELY(rc == VINF_SUCCESS))
4674 {
4675 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4676 return (uint16_t)u32;
4677 }
4678 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4679 {
4680 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4681 remR3RaiseRC(env->pVM, rc);
4682 return (uint16_t)u32;
4683 }
4684 remAbort(rc, __FUNCTION__);
4685 return UINT16_C(0xffff);
4686}
4687
4688uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4689{
4690 uint32_t u32 = 0;
4691 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 4);
4692 if (RT_LIKELY(rc == VINF_SUCCESS))
4693 {
4694 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4695 return u32;
4696 }
4697 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4698 {
4699 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4700 remR3RaiseRC(env->pVM, rc);
4701 return u32;
4702 }
4703 remAbort(rc, __FUNCTION__);
4704 return UINT32_C(0xffffffff);
4705}
4706
4707#undef LOG_GROUP
4708#define LOG_GROUP LOG_GROUP_REM
4709
4710
4711/* -+- helpers and misc other interfaces -+- */
4712
4713/**
4714 * Perform the CPUID instruction.
4715 *
4716 * @param env Pointer to the recompiler CPU structure.
4717 * @param idx The CPUID leaf (eax).
4718 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4719 * @param pvEAX Where to store eax.
4720 * @param pvEBX Where to store ebx.
4721 * @param pvECX Where to store ecx.
4722 * @param pvEDX Where to store edx.
4723 */
4724void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4725 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4726{
4727 NOREF(idxSub);
4728 CPUMGetGuestCpuId(env->pVCpu, idx, idxSub, pEAX, pEBX, pECX, pEDX);
4729}
4730
4731
4732#if 0 /* not used */
4733/**
4734 * Interface for qemu hardware to report back fatal errors.
4735 */
4736void hw_error(const char *pszFormat, ...)
4737{
4738 /*
4739 * Bitch about it.
4740 */
4741 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4742 * this in my Odin32 tree at home! */
4743 va_list args;
4744 va_start(args, pszFormat);
4745 RTLogPrintf("fatal error in virtual hardware:");
4746 RTLogPrintfV(pszFormat, args);
4747 va_end(args);
4748 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4749
4750 /*
4751 * If we're in REM context we'll sync back the state before 'jumping' to
4752 * the EMs failure handling.
4753 */
4754 PVM pVM = cpu_single_env->pVM;
4755 if (pVM->rem.s.fInREM)
4756 REMR3StateBack(pVM);
4757 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4758 AssertMsgFailed(("EMR3FatalError returned!\n"));
4759}
4760#endif
4761
4762/**
4763 * Interface for the qemu cpu to report unhandled situation
4764 * raising a fatal VM error.
4765 */
4766void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4767{
4768 va_list va;
4769 PVM pVM;
4770 PVMCPU pVCpu;
4771 char szMsg[256];
4772
4773 /*
4774 * Bitch about it.
4775 */
4776 RTLogFlags(NULL, "nodisabled nobuffered");
4777 RTLogFlush(NULL);
4778
4779 va_start(va, pszFormat);
4780#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4781 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4782 unsigned cArgs = 0;
4783 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4784 const char *psz = strchr(pszFormat, '%');
4785 while (psz && cArgs < 6)
4786 {
4787 auArgs[cArgs++] = va_arg(va, uintptr_t);
4788 psz = strchr(psz + 1, '%');
4789 }
4790 switch (cArgs)
4791 {
4792 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4793 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4794 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4795 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4796 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4797 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4798 default:
4799 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4800 }
4801#else
4802 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4803#endif
4804 va_end(va);
4805
4806 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4807 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4808
4809 /*
4810 * If we're in REM context we'll sync back the state before 'jumping' to
4811 * the EMs failure handling.
4812 */
4813 pVM = cpu_single_env->pVM;
4814 pVCpu = cpu_single_env->pVCpu;
4815 Assert(pVCpu);
4816
4817 if (pVM->rem.s.fInREM)
4818 REMR3StateBack(pVM, pVCpu);
4819 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4820 AssertMsgFailed(("EMR3FatalError returned!\n"));
4821}
4822
4823
4824/**
4825 * Aborts the VM.
4826 *
4827 * @param rc VBox error code.
4828 * @param pszTip Hint about why/when this happened.
4829 */
4830void remAbort(int rc, const char *pszTip)
4831{
4832 PVM pVM;
4833 PVMCPU pVCpu;
4834
4835 /*
4836 * Bitch about it.
4837 */
4838 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4839 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4840
4841 /*
4842 * Jump back to where we entered the recompiler.
4843 */
4844 pVM = cpu_single_env->pVM;
4845 pVCpu = cpu_single_env->pVCpu;
4846 Assert(pVCpu);
4847
4848 if (pVM->rem.s.fInREM)
4849 REMR3StateBack(pVM, pVCpu);
4850
4851 EMR3FatalError(pVCpu, rc);
4852 AssertMsgFailed(("EMR3FatalError returned!\n"));
4853}
4854
4855
4856/**
4857 * Dumps a linux system call.
4858 * @param pVCpu VMCPU handle.
4859 */
4860void remR3DumpLnxSyscall(PVMCPU pVCpu)
4861{
4862 static const char *apsz[] =
4863 {
4864 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4865 "sys_exit",
4866 "sys_fork",
4867 "sys_read",
4868 "sys_write",
4869 "sys_open", /* 5 */
4870 "sys_close",
4871 "sys_waitpid",
4872 "sys_creat",
4873 "sys_link",
4874 "sys_unlink", /* 10 */
4875 "sys_execve",
4876 "sys_chdir",
4877 "sys_time",
4878 "sys_mknod",
4879 "sys_chmod", /* 15 */
4880 "sys_lchown16",
4881 "sys_ni_syscall", /* old break syscall holder */
4882 "sys_stat",
4883 "sys_lseek",
4884 "sys_getpid", /* 20 */
4885 "sys_mount",
4886 "sys_oldumount",
4887 "sys_setuid16",
4888 "sys_getuid16",
4889 "sys_stime", /* 25 */
4890 "sys_ptrace",
4891 "sys_alarm",
4892 "sys_fstat",
4893 "sys_pause",
4894 "sys_utime", /* 30 */
4895 "sys_ni_syscall", /* old stty syscall holder */
4896 "sys_ni_syscall", /* old gtty syscall holder */
4897 "sys_access",
4898 "sys_nice",
4899 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4900 "sys_sync",
4901 "sys_kill",
4902 "sys_rename",
4903 "sys_mkdir",
4904 "sys_rmdir", /* 40 */
4905 "sys_dup",
4906 "sys_pipe",
4907 "sys_times",
4908 "sys_ni_syscall", /* old prof syscall holder */
4909 "sys_brk", /* 45 */
4910 "sys_setgid16",
4911 "sys_getgid16",
4912 "sys_signal",
4913 "sys_geteuid16",
4914 "sys_getegid16", /* 50 */
4915 "sys_acct",
4916 "sys_umount", /* recycled never used phys() */
4917 "sys_ni_syscall", /* old lock syscall holder */
4918 "sys_ioctl",
4919 "sys_fcntl", /* 55 */
4920 "sys_ni_syscall", /* old mpx syscall holder */
4921 "sys_setpgid",
4922 "sys_ni_syscall", /* old ulimit syscall holder */
4923 "sys_olduname",
4924 "sys_umask", /* 60 */
4925 "sys_chroot",
4926 "sys_ustat",
4927 "sys_dup2",
4928 "sys_getppid",
4929 "sys_getpgrp", /* 65 */
4930 "sys_setsid",
4931 "sys_sigaction",
4932 "sys_sgetmask",
4933 "sys_ssetmask",
4934 "sys_setreuid16", /* 70 */
4935 "sys_setregid16",
4936 "sys_sigsuspend",
4937 "sys_sigpending",
4938 "sys_sethostname",
4939 "sys_setrlimit", /* 75 */
4940 "sys_old_getrlimit",
4941 "sys_getrusage",
4942 "sys_gettimeofday",
4943 "sys_settimeofday",
4944 "sys_getgroups16", /* 80 */
4945 "sys_setgroups16",
4946 "old_select",
4947 "sys_symlink",
4948 "sys_lstat",
4949 "sys_readlink", /* 85 */
4950 "sys_uselib",
4951 "sys_swapon",
4952 "sys_reboot",
4953 "old_readdir",
4954 "old_mmap", /* 90 */
4955 "sys_munmap",
4956 "sys_truncate",
4957 "sys_ftruncate",
4958 "sys_fchmod",
4959 "sys_fchown16", /* 95 */
4960 "sys_getpriority",
4961 "sys_setpriority",
4962 "sys_ni_syscall", /* old profil syscall holder */
4963 "sys_statfs",
4964 "sys_fstatfs", /* 100 */
4965 "sys_ioperm",
4966 "sys_socketcall",
4967 "sys_syslog",
4968 "sys_setitimer",
4969 "sys_getitimer", /* 105 */
4970 "sys_newstat",
4971 "sys_newlstat",
4972 "sys_newfstat",
4973 "sys_uname",
4974 "sys_iopl", /* 110 */
4975 "sys_vhangup",
4976 "sys_ni_syscall", /* old "idle" system call */
4977 "sys_vm86old",
4978 "sys_wait4",
4979 "sys_swapoff", /* 115 */
4980 "sys_sysinfo",
4981 "sys_ipc",
4982 "sys_fsync",
4983 "sys_sigreturn",
4984 "sys_clone", /* 120 */
4985 "sys_setdomainname",
4986 "sys_newuname",
4987 "sys_modify_ldt",
4988 "sys_adjtimex",
4989 "sys_mprotect", /* 125 */
4990 "sys_sigprocmask",
4991 "sys_ni_syscall", /* old "create_module" */
4992 "sys_init_module",
4993 "sys_delete_module",
4994 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4995 "sys_quotactl",
4996 "sys_getpgid",
4997 "sys_fchdir",
4998 "sys_bdflush",
4999 "sys_sysfs", /* 135 */
5000 "sys_personality",
5001 "sys_ni_syscall", /* reserved for afs_syscall */
5002 "sys_setfsuid16",
5003 "sys_setfsgid16",
5004 "sys_llseek", /* 140 */
5005 "sys_getdents",
5006 "sys_select",
5007 "sys_flock",
5008 "sys_msync",
5009 "sys_readv", /* 145 */
5010 "sys_writev",
5011 "sys_getsid",
5012 "sys_fdatasync",
5013 "sys_sysctl",
5014 "sys_mlock", /* 150 */
5015 "sys_munlock",
5016 "sys_mlockall",
5017 "sys_munlockall",
5018 "sys_sched_setparam",
5019 "sys_sched_getparam", /* 155 */
5020 "sys_sched_setscheduler",
5021 "sys_sched_getscheduler",
5022 "sys_sched_yield",
5023 "sys_sched_get_priority_max",
5024 "sys_sched_get_priority_min", /* 160 */
5025 "sys_sched_rr_get_interval",
5026 "sys_nanosleep",
5027 "sys_mremap",
5028 "sys_setresuid16",
5029 "sys_getresuid16", /* 165 */
5030 "sys_vm86",
5031 "sys_ni_syscall", /* Old sys_query_module */
5032 "sys_poll",
5033 "sys_nfsservctl",
5034 "sys_setresgid16", /* 170 */
5035 "sys_getresgid16",
5036 "sys_prctl",
5037 "sys_rt_sigreturn",
5038 "sys_rt_sigaction",
5039 "sys_rt_sigprocmask", /* 175 */
5040 "sys_rt_sigpending",
5041 "sys_rt_sigtimedwait",
5042 "sys_rt_sigqueueinfo",
5043 "sys_rt_sigsuspend",
5044 "sys_pread64", /* 180 */
5045 "sys_pwrite64",
5046 "sys_chown16",
5047 "sys_getcwd",
5048 "sys_capget",
5049 "sys_capset", /* 185 */
5050 "sys_sigaltstack",
5051 "sys_sendfile",
5052 "sys_ni_syscall", /* reserved for streams1 */
5053 "sys_ni_syscall", /* reserved for streams2 */
5054 "sys_vfork", /* 190 */
5055 "sys_getrlimit",
5056 "sys_mmap2",
5057 "sys_truncate64",
5058 "sys_ftruncate64",
5059 "sys_stat64", /* 195 */
5060 "sys_lstat64",
5061 "sys_fstat64",
5062 "sys_lchown",
5063 "sys_getuid",
5064 "sys_getgid", /* 200 */
5065 "sys_geteuid",
5066 "sys_getegid",
5067 "sys_setreuid",
5068 "sys_setregid",
5069 "sys_getgroups", /* 205 */
5070 "sys_setgroups",
5071 "sys_fchown",
5072 "sys_setresuid",
5073 "sys_getresuid",
5074 "sys_setresgid", /* 210 */
5075 "sys_getresgid",
5076 "sys_chown",
5077 "sys_setuid",
5078 "sys_setgid",
5079 "sys_setfsuid", /* 215 */
5080 "sys_setfsgid",
5081 "sys_pivot_root",
5082 "sys_mincore",
5083 "sys_madvise",
5084 "sys_getdents64", /* 220 */
5085 "sys_fcntl64",
5086 "sys_ni_syscall", /* reserved for TUX */
5087 "sys_ni_syscall",
5088 "sys_gettid",
5089 "sys_readahead", /* 225 */
5090 "sys_setxattr",
5091 "sys_lsetxattr",
5092 "sys_fsetxattr",
5093 "sys_getxattr",
5094 "sys_lgetxattr", /* 230 */
5095 "sys_fgetxattr",
5096 "sys_listxattr",
5097 "sys_llistxattr",
5098 "sys_flistxattr",
5099 "sys_removexattr", /* 235 */
5100 "sys_lremovexattr",
5101 "sys_fremovexattr",
5102 "sys_tkill",
5103 "sys_sendfile64",
5104 "sys_futex", /* 240 */
5105 "sys_sched_setaffinity",
5106 "sys_sched_getaffinity",
5107 "sys_set_thread_area",
5108 "sys_get_thread_area",
5109 "sys_io_setup", /* 245 */
5110 "sys_io_destroy",
5111 "sys_io_getevents",
5112 "sys_io_submit",
5113 "sys_io_cancel",
5114 "sys_fadvise64", /* 250 */
5115 "sys_ni_syscall",
5116 "sys_exit_group",
5117 "sys_lookup_dcookie",
5118 "sys_epoll_create",
5119 "sys_epoll_ctl", /* 255 */
5120 "sys_epoll_wait",
5121 "sys_remap_file_pages",
5122 "sys_set_tid_address",
5123 "sys_timer_create",
5124 "sys_timer_settime", /* 260 */
5125 "sys_timer_gettime",
5126 "sys_timer_getoverrun",
5127 "sys_timer_delete",
5128 "sys_clock_settime",
5129 "sys_clock_gettime", /* 265 */
5130 "sys_clock_getres",
5131 "sys_clock_nanosleep",
5132 "sys_statfs64",
5133 "sys_fstatfs64",
5134 "sys_tgkill", /* 270 */
5135 "sys_utimes",
5136 "sys_fadvise64_64",
5137 "sys_ni_syscall" /* sys_vserver */
5138 };
5139
5140 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5141 switch (uEAX)
5142 {
5143 default:
5144 if (uEAX < RT_ELEMENTS(apsz))
5145 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5146 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5147 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5148 else
5149 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5150 break;
5151
5152 }
5153}
5154
5155
5156/**
5157 * Dumps an OpenBSD system call.
5158 * @param pVCpu VMCPU handle.
5159 */
5160void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5161{
5162 static const char *apsz[] =
5163 {
5164 "SYS_syscall", //0
5165 "SYS_exit", //1
5166 "SYS_fork", //2
5167 "SYS_read", //3
5168 "SYS_write", //4
5169 "SYS_open", //5
5170 "SYS_close", //6
5171 "SYS_wait4", //7
5172 "SYS_8",
5173 "SYS_link", //9
5174 "SYS_unlink", //10
5175 "SYS_11",
5176 "SYS_chdir", //12
5177 "SYS_fchdir", //13
5178 "SYS_mknod", //14
5179 "SYS_chmod", //15
5180 "SYS_chown", //16
5181 "SYS_break", //17
5182 "SYS_18",
5183 "SYS_19",
5184 "SYS_getpid", //20
5185 "SYS_mount", //21
5186 "SYS_unmount", //22
5187 "SYS_setuid", //23
5188 "SYS_getuid", //24
5189 "SYS_geteuid", //25
5190 "SYS_ptrace", //26
5191 "SYS_recvmsg", //27
5192 "SYS_sendmsg", //28
5193 "SYS_recvfrom", //29
5194 "SYS_accept", //30
5195 "SYS_getpeername", //31
5196 "SYS_getsockname", //32
5197 "SYS_access", //33
5198 "SYS_chflags", //34
5199 "SYS_fchflags", //35
5200 "SYS_sync", //36
5201 "SYS_kill", //37
5202 "SYS_38",
5203 "SYS_getppid", //39
5204 "SYS_40",
5205 "SYS_dup", //41
5206 "SYS_opipe", //42
5207 "SYS_getegid", //43
5208 "SYS_profil", //44
5209 "SYS_ktrace", //45
5210 "SYS_sigaction", //46
5211 "SYS_getgid", //47
5212 "SYS_sigprocmask", //48
5213 "SYS_getlogin", //49
5214 "SYS_setlogin", //50
5215 "SYS_acct", //51
5216 "SYS_sigpending", //52
5217 "SYS_osigaltstack", //53
5218 "SYS_ioctl", //54
5219 "SYS_reboot", //55
5220 "SYS_revoke", //56
5221 "SYS_symlink", //57
5222 "SYS_readlink", //58
5223 "SYS_execve", //59
5224 "SYS_umask", //60
5225 "SYS_chroot", //61
5226 "SYS_62",
5227 "SYS_63",
5228 "SYS_64",
5229 "SYS_65",
5230 "SYS_vfork", //66
5231 "SYS_67",
5232 "SYS_68",
5233 "SYS_sbrk", //69
5234 "SYS_sstk", //70
5235 "SYS_61",
5236 "SYS_vadvise", //72
5237 "SYS_munmap", //73
5238 "SYS_mprotect", //74
5239 "SYS_madvise", //75
5240 "SYS_76",
5241 "SYS_77",
5242 "SYS_mincore", //78
5243 "SYS_getgroups", //79
5244 "SYS_setgroups", //80
5245 "SYS_getpgrp", //81
5246 "SYS_setpgid", //82
5247 "SYS_setitimer", //83
5248 "SYS_84",
5249 "SYS_85",
5250 "SYS_getitimer", //86
5251 "SYS_87",
5252 "SYS_88",
5253 "SYS_89",
5254 "SYS_dup2", //90
5255 "SYS_91",
5256 "SYS_fcntl", //92
5257 "SYS_select", //93
5258 "SYS_94",
5259 "SYS_fsync", //95
5260 "SYS_setpriority", //96
5261 "SYS_socket", //97
5262 "SYS_connect", //98
5263 "SYS_99",
5264 "SYS_getpriority", //100
5265 "SYS_101",
5266 "SYS_102",
5267 "SYS_sigreturn", //103
5268 "SYS_bind", //104
5269 "SYS_setsockopt", //105
5270 "SYS_listen", //106
5271 "SYS_107",
5272 "SYS_108",
5273 "SYS_109",
5274 "SYS_110",
5275 "SYS_sigsuspend", //111
5276 "SYS_112",
5277 "SYS_113",
5278 "SYS_114",
5279 "SYS_115",
5280 "SYS_gettimeofday", //116
5281 "SYS_getrusage", //117
5282 "SYS_getsockopt", //118
5283 "SYS_119",
5284 "SYS_readv", //120
5285 "SYS_writev", //121
5286 "SYS_settimeofday", //122
5287 "SYS_fchown", //123
5288 "SYS_fchmod", //124
5289 "SYS_125",
5290 "SYS_setreuid", //126
5291 "SYS_setregid", //127
5292 "SYS_rename", //128
5293 "SYS_129",
5294 "SYS_130",
5295 "SYS_flock", //131
5296 "SYS_mkfifo", //132
5297 "SYS_sendto", //133
5298 "SYS_shutdown", //134
5299 "SYS_socketpair", //135
5300 "SYS_mkdir", //136
5301 "SYS_rmdir", //137
5302 "SYS_utimes", //138
5303 "SYS_139",
5304 "SYS_adjtime", //140
5305 "SYS_141",
5306 "SYS_142",
5307 "SYS_143",
5308 "SYS_144",
5309 "SYS_145",
5310 "SYS_146",
5311 "SYS_setsid", //147
5312 "SYS_quotactl", //148
5313 "SYS_149",
5314 "SYS_150",
5315 "SYS_151",
5316 "SYS_152",
5317 "SYS_153",
5318 "SYS_154",
5319 "SYS_nfssvc", //155
5320 "SYS_156",
5321 "SYS_157",
5322 "SYS_158",
5323 "SYS_159",
5324 "SYS_160",
5325 "SYS_getfh", //161
5326 "SYS_162",
5327 "SYS_163",
5328 "SYS_164",
5329 "SYS_sysarch", //165
5330 "SYS_166",
5331 "SYS_167",
5332 "SYS_168",
5333 "SYS_169",
5334 "SYS_170",
5335 "SYS_171",
5336 "SYS_172",
5337 "SYS_pread", //173
5338 "SYS_pwrite", //174
5339 "SYS_175",
5340 "SYS_176",
5341 "SYS_177",
5342 "SYS_178",
5343 "SYS_179",
5344 "SYS_180",
5345 "SYS_setgid", //181
5346 "SYS_setegid", //182
5347 "SYS_seteuid", //183
5348 "SYS_lfs_bmapv", //184
5349 "SYS_lfs_markv", //185
5350 "SYS_lfs_segclean", //186
5351 "SYS_lfs_segwait", //187
5352 "SYS_188",
5353 "SYS_189",
5354 "SYS_190",
5355 "SYS_pathconf", //191
5356 "SYS_fpathconf", //192
5357 "SYS_swapctl", //193
5358 "SYS_getrlimit", //194
5359 "SYS_setrlimit", //195
5360 "SYS_getdirentries", //196
5361 "SYS_mmap", //197
5362 "SYS___syscall", //198
5363 "SYS_lseek", //199
5364 "SYS_truncate", //200
5365 "SYS_ftruncate", //201
5366 "SYS___sysctl", //202
5367 "SYS_mlock", //203
5368 "SYS_munlock", //204
5369 "SYS_205",
5370 "SYS_futimes", //206
5371 "SYS_getpgid", //207
5372 "SYS_xfspioctl", //208
5373 "SYS_209",
5374 "SYS_210",
5375 "SYS_211",
5376 "SYS_212",
5377 "SYS_213",
5378 "SYS_214",
5379 "SYS_215",
5380 "SYS_216",
5381 "SYS_217",
5382 "SYS_218",
5383 "SYS_219",
5384 "SYS_220",
5385 "SYS_semget", //221
5386 "SYS_222",
5387 "SYS_223",
5388 "SYS_224",
5389 "SYS_msgget", //225
5390 "SYS_msgsnd", //226
5391 "SYS_msgrcv", //227
5392 "SYS_shmat", //228
5393 "SYS_229",
5394 "SYS_shmdt", //230
5395 "SYS_231",
5396 "SYS_clock_gettime", //232
5397 "SYS_clock_settime", //233
5398 "SYS_clock_getres", //234
5399 "SYS_235",
5400 "SYS_236",
5401 "SYS_237",
5402 "SYS_238",
5403 "SYS_239",
5404 "SYS_nanosleep", //240
5405 "SYS_241",
5406 "SYS_242",
5407 "SYS_243",
5408 "SYS_244",
5409 "SYS_245",
5410 "SYS_246",
5411 "SYS_247",
5412 "SYS_248",
5413 "SYS_249",
5414 "SYS_minherit", //250
5415 "SYS_rfork", //251
5416 "SYS_poll", //252
5417 "SYS_issetugid", //253
5418 "SYS_lchown", //254
5419 "SYS_getsid", //255
5420 "SYS_msync", //256
5421 "SYS_257",
5422 "SYS_258",
5423 "SYS_259",
5424 "SYS_getfsstat", //260
5425 "SYS_statfs", //261
5426 "SYS_fstatfs", //262
5427 "SYS_pipe", //263
5428 "SYS_fhopen", //264
5429 "SYS_265",
5430 "SYS_fhstatfs", //266
5431 "SYS_preadv", //267
5432 "SYS_pwritev", //268
5433 "SYS_kqueue", //269
5434 "SYS_kevent", //270
5435 "SYS_mlockall", //271
5436 "SYS_munlockall", //272
5437 "SYS_getpeereid", //273
5438 "SYS_274",
5439 "SYS_275",
5440 "SYS_276",
5441 "SYS_277",
5442 "SYS_278",
5443 "SYS_279",
5444 "SYS_280",
5445 "SYS_getresuid", //281
5446 "SYS_setresuid", //282
5447 "SYS_getresgid", //283
5448 "SYS_setresgid", //284
5449 "SYS_285",
5450 "SYS_mquery", //286
5451 "SYS_closefrom", //287
5452 "SYS_sigaltstack", //288
5453 "SYS_shmget", //289
5454 "SYS_semop", //290
5455 "SYS_stat", //291
5456 "SYS_fstat", //292
5457 "SYS_lstat", //293
5458 "SYS_fhstat", //294
5459 "SYS___semctl", //295
5460 "SYS_shmctl", //296
5461 "SYS_msgctl", //297
5462 "SYS_MAXSYSCALL", //298
5463 //299
5464 //300
5465 };
5466 uint32_t uEAX;
5467 if (!LogIsEnabled())
5468 return;
5469 uEAX = CPUMGetGuestEAX(pVCpu);
5470 switch (uEAX)
5471 {
5472 default:
5473 if (uEAX < RT_ELEMENTS(apsz))
5474 {
5475 uint32_t au32Args[8] = {0};
5476 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5477 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5478 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5479 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5480 }
5481 else
5482 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5483 break;
5484 }
5485}
5486
5487
5488#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5489/**
5490 * The Dll main entry point (stub).
5491 */
5492bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5493{
5494 return true;
5495}
5496
5497void *memcpy(void *dst, const void *src, size_t size)
5498{
5499 uint8_t*pbDst = dst, *pbSrc = src;
5500 while (size-- > 0)
5501 *pbDst++ = *pbSrc++;
5502 return dst;
5503}
5504
5505#endif
5506
5507void cpu_smm_update(CPUX86State *env)
5508{
5509}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette