VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 42470

Last change on this file since 42470 was 42420, checked in by vboxsync, 12 years ago

Eliminating CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID and CPUMAreHiddenSelRegsValid. Addressing some LDTR and TR things (saved stated, transition to REM).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 180.6 KB
Line 
1/* $Id: VBoxRecompiler.c 42420 2012-07-26 17:33:01Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMCOUNTER gStatCpuGetTSC;
130static STAMCOUNTER gStatRefuseTFInhibit;
131static STAMCOUNTER gStatRefuseVM86;
132static STAMCOUNTER gStatRefusePaging;
133static STAMCOUNTER gStatRefusePAE;
134static STAMCOUNTER gStatRefuseIOPLNot0;
135static STAMCOUNTER gStatRefuseIF0;
136static STAMCOUNTER gStatRefuseCode16;
137static STAMCOUNTER gStatRefuseWP0;
138static STAMCOUNTER gStatRefuseRing1or2;
139static STAMCOUNTER gStatRefuseCanExecute;
140static STAMCOUNTER gaStatRefuseStale[6];
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .fFlags = 0,
214 .pfnHandler = remR3CmdDisasEnableStepping,
215 .pszSyntax = "[on/off]",
216 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
217 "If no arguments show the current state."
218 }
219};
220#endif
221
222/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
223 * @todo huh??? That cannot be the case on the mac... So, this
224 * point is probably not valid any longer. */
225uint8_t *code_gen_prologue;
226
227
228/*******************************************************************************
229* Internal Functions *
230*******************************************************************************/
231void remAbort(int rc, const char *pszTip);
232extern int testmath(void);
233
234/* Put them here to avoid unused variable warning. */
235AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
236#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
237//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
238/* Why did this have to be identical?? */
239AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
240#else
241AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
242#endif
243
244
245/**
246 * Initializes the REM.
247 *
248 * @returns VBox status code.
249 * @param pVM The VM to operate on.
250 */
251REMR3DECL(int) REMR3Init(PVM pVM)
252{
253 PREMHANDLERNOTIFICATION pCur;
254 uint32_t u32Dummy;
255 int rc;
256 unsigned i;
257
258#ifdef VBOX_ENABLE_VBOXREM64
259 LogRel(("Using 64-bit aware REM\n"));
260#endif
261
262 /*
263 * Assert sanity.
264 */
265 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
266 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
267 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
268#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
269 Assert(!testmath());
270#endif
271
272 /*
273 * Init some internal data members.
274 */
275 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
276 pVM->rem.s.Env.pVM = pVM;
277#ifdef CPU_RAW_MODE_INIT
278 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
279#endif
280
281 /*
282 * Initialize the REM critical section.
283 *
284 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
285 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
286 * deadlocks. (mostly pgm vs rem locking)
287 */
288 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
289 AssertRCReturn(rc, rc);
290
291 /* ctx. */
292 pVM->rem.s.pCtx = NULL; /* set when executing code. */
293 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
294
295 /* ignore all notifications */
296 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
297
298 code_gen_prologue = RTMemExecAlloc(_1K);
299 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
300
301 cpu_exec_init_all(0);
302
303 /*
304 * Init the recompiler.
305 */
306 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
307 {
308 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
309 return VERR_GENERAL_FAILURE;
310 }
311 PVMCPU pVCpu = VMMGetCpu(pVM);
312 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
313 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
314
315 EMRemLock(pVM);
316 cpu_reset(&pVM->rem.s.Env);
317 EMRemUnlock(pVM);
318
319 /* allocate code buffer for single instruction emulation. */
320 pVM->rem.s.Env.cbCodeBuffer = 4096;
321 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
322 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
323
324 /* Finally, set the cpu_single_env global. */
325 cpu_single_env = &pVM->rem.s.Env;
326
327 /* Nothing is pending by default */
328 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
329
330 /*
331 * Register ram types.
332 */
333 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
334 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
335 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
336 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
337 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
338
339 /* stop ignoring. */
340 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
341
342 /*
343 * Register the saved state data unit.
344 */
345 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
346 NULL, NULL, NULL,
347 NULL, remR3Save, NULL,
348 NULL, remR3Load, NULL);
349 if (RT_FAILURE(rc))
350 return rc;
351
352#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
353 /*
354 * Debugger commands.
355 */
356 static bool fRegisteredCmds = false;
357 if (!fRegisteredCmds)
358 {
359 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
360 if (RT_SUCCESS(rc))
361 fRegisteredCmds = true;
362 }
363#endif
364
365#ifdef VBOX_WITH_STATISTICS
366 /*
367 * Statistics.
368 */
369 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
370 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
371 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
372 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
373 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
374 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
375 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
376 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
377 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
378 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
379 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
380
381 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
382
383 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
384 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
385 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
386 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
387 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
388 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
389 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
390 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
391 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
392 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
393 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES");
394 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS");
395 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS");
396 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS");
397 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS");
398 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS");
399 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
400
401 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
402 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
403 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
404 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
405
406 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
407 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
408 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
412
413 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
414 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
415 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
416 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
417 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
418 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
419
420 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
421#endif /* VBOX_WITH_STATISTICS */
422 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
423 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
424
425 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
426 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
427 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
428
429
430#ifdef DEBUG_ALL_LOGGING
431 loglevel = ~0;
432#endif
433
434 /*
435 * Init the handler notification lists.
436 */
437 pVM->rem.s.idxPendingList = UINT32_MAX;
438 pVM->rem.s.idxFreeList = 0;
439
440 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
441 {
442 pCur = &pVM->rem.s.aHandlerNotifications[i];
443 pCur->idxNext = i + 1;
444 pCur->idxSelf = i;
445 }
446 pCur->idxNext = UINT32_MAX; /* the last record. */
447
448 return rc;
449}
450
451
452/**
453 * Finalizes the REM initialization.
454 *
455 * This is called after all components, devices and drivers has
456 * been initialized. Its main purpose it to finish the RAM related
457 * initialization.
458 *
459 * @returns VBox status code.
460 *
461 * @param pVM The VM handle.
462 */
463REMR3DECL(int) REMR3InitFinalize(PVM pVM)
464{
465 int rc;
466
467 /*
468 * Ram size & dirty bit map.
469 */
470 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
471 pVM->rem.s.fGCPhysLastRamFixed = true;
472#ifdef RT_STRICT
473 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
474#else
475 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
476#endif
477 return rc;
478}
479
480/**
481 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
482 *
483 * @returns VBox status code.
484 * @param pVM The VM handle.
485 * @param fGuarded Whether to guard the map.
486 */
487static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
488{
489 int rc = VINF_SUCCESS;
490 RTGCPHYS cb;
491
492 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
493
494 cb = pVM->rem.s.GCPhysLastRam + 1;
495 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
496 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
497 VERR_OUT_OF_RANGE);
498
499 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
500 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
501
502 if (!fGuarded)
503 {
504 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
505 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
506 }
507 else
508 {
509 /*
510 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
511 */
512 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
513 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
514 if (cbBitmapFull == cbBitmapAligned)
515 cbBitmapFull += _4G >> PAGE_SHIFT;
516 else if (cbBitmapFull - cbBitmapAligned < _64K)
517 cbBitmapFull += _64K;
518
519 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
520 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
521
522 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
523 if (RT_FAILURE(rc))
524 {
525 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
526 AssertLogRelRCReturn(rc, rc);
527 }
528
529 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
530 }
531
532 /* initialize it. */
533 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
534 return rc;
535}
536
537
538/**
539 * Terminates the REM.
540 *
541 * Termination means cleaning up and freeing all resources,
542 * the VM it self is at this point powered off or suspended.
543 *
544 * @returns VBox status code.
545 * @param pVM The VM to operate on.
546 */
547REMR3DECL(int) REMR3Term(PVM pVM)
548{
549#ifdef VBOX_WITH_STATISTICS
550 /*
551 * Statistics.
552 */
553 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
554 STAM_DEREG(pVM, &gStatCompilationQEmu);
555 STAM_DEREG(pVM, &gStatRunCodeQEmu);
556 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
557 STAM_DEREG(pVM, &gStatTimers);
558 STAM_DEREG(pVM, &gStatTBLookup);
559 STAM_DEREG(pVM, &gStatIRQ);
560 STAM_DEREG(pVM, &gStatRawCheck);
561 STAM_DEREG(pVM, &gStatMemRead);
562 STAM_DEREG(pVM, &gStatMemWrite);
563 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
564
565 STAM_DEREG(pVM, &gStatCpuGetTSC);
566
567 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
568 STAM_DEREG(pVM, &gStatRefuseVM86);
569 STAM_DEREG(pVM, &gStatRefusePaging);
570 STAM_DEREG(pVM, &gStatRefusePAE);
571 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
572 STAM_DEREG(pVM, &gStatRefuseIF0);
573 STAM_DEREG(pVM, &gStatRefuseCode16);
574 STAM_DEREG(pVM, &gStatRefuseWP0);
575 STAM_DEREG(pVM, &gStatRefuseRing1or2);
576 STAM_DEREG(pVM, &gStatRefuseCanExecute);
577 STAM_DEREG(pVM, &gaStatRefuseStale[0]);
578 STAM_DEREG(pVM, &gaStatRefuseStale[1]);
579 STAM_DEREG(pVM, &gaStatRefuseStale[2]);
580 STAM_DEREG(pVM, &gaStatRefuseStale[3]);
581 STAM_DEREG(pVM, &gaStatRefuseStale[4]);
582 STAM_DEREG(pVM, &gaStatRefuseStale[5]);
583 STAM_DEREG(pVM, &gStatFlushTBs);
584
585 STAM_DEREG(pVM, &gStatREMGDTChange);
586 STAM_DEREG(pVM, &gStatREMLDTRChange);
587 STAM_DEREG(pVM, &gStatREMIDTChange);
588 STAM_DEREG(pVM, &gStatREMTRChange);
589
590 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
591 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
592 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
593 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
594 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
595 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
596
597 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
598 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
599 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
600 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
601 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
602 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
603
604 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
605#endif /* VBOX_WITH_STATISTICS */
606
607 STAM_REL_DEREG(pVM, &tb_flush_count);
608 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
609 STAM_REL_DEREG(pVM, &tlb_flush_count);
610
611 return VINF_SUCCESS;
612}
613
614
615/**
616 * The VM is being reset.
617 *
618 * For the REM component this means to call the cpu_reset() and
619 * reinitialize some state variables.
620 *
621 * @param pVM VM handle.
622 */
623REMR3DECL(void) REMR3Reset(PVM pVM)
624{
625 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
626
627 /*
628 * Reset the REM cpu.
629 */
630 Assert(pVM->rem.s.cIgnoreAll == 0);
631 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
632 cpu_reset(&pVM->rem.s.Env);
633 pVM->rem.s.cInvalidatedPages = 0;
634 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
635 Assert(pVM->rem.s.cIgnoreAll == 0);
636
637 /* Clear raw ring 0 init state */
638 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
639
640 /* Flush the TBs the next time we execute code here. */
641 pVM->rem.s.fFlushTBs = true;
642
643 EMRemUnlock(pVM);
644}
645
646
647/**
648 * Execute state save operation.
649 *
650 * @returns VBox status code.
651 * @param pVM VM Handle.
652 * @param pSSM SSM operation handle.
653 */
654static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
655{
656 PREM pRem = &pVM->rem.s;
657
658 /*
659 * Save the required CPU Env bits.
660 * (Not much because we're never in REM when doing the save.)
661 */
662 LogFlow(("remR3Save:\n"));
663 Assert(!pRem->fInREM);
664 SSMR3PutU32(pSSM, pRem->Env.hflags);
665 SSMR3PutU32(pSSM, ~0); /* separator */
666
667 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
668 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
669 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
670
671 return SSMR3PutU32(pSSM, ~0); /* terminator */
672}
673
674
675/**
676 * Execute state load operation.
677 *
678 * @returns VBox status code.
679 * @param pVM VM Handle.
680 * @param pSSM SSM operation handle.
681 * @param uVersion Data layout version.
682 * @param uPass The data pass.
683 */
684static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
685{
686 uint32_t u32Dummy;
687 uint32_t fRawRing0 = false;
688 uint32_t u32Sep;
689 uint32_t i;
690 int rc;
691 PREM pRem;
692
693 LogFlow(("remR3Load:\n"));
694 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
695
696 /*
697 * Validate version.
698 */
699 if ( uVersion != REM_SAVED_STATE_VERSION
700 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
701 {
702 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
703 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
704 }
705
706 /*
707 * Do a reset to be on the safe side...
708 */
709 REMR3Reset(pVM);
710
711 /*
712 * Ignore all ignorable notifications.
713 * (Not doing this will cause serious trouble.)
714 */
715 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
716
717 /*
718 * Load the required CPU Env bits.
719 * (Not much because we're never in REM when doing the save.)
720 */
721 pRem = &pVM->rem.s;
722 Assert(!pRem->fInREM);
723 SSMR3GetU32(pSSM, &pRem->Env.hflags);
724 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
725 {
726 /* Redundant REM CPU state has to be loaded, but can be ignored. */
727 CPUX86State_Ver16 temp;
728 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
729 }
730
731 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
732 if (RT_FAILURE(rc))
733 return rc;
734 if (u32Sep != ~0U)
735 {
736 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
737 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
738 }
739
740 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
741 SSMR3GetUInt(pSSM, &fRawRing0);
742 if (fRawRing0)
743 pRem->Env.state |= CPU_RAW_RING0;
744
745 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
746 {
747 /*
748 * Load the REM stuff.
749 */
750 /** @todo r=bird: We should just drop all these items, restoring doesn't make
751 * sense. */
752 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
753 if (RT_FAILURE(rc))
754 return rc;
755 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
756 {
757 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
758 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
759 }
760 for (i = 0; i < pRem->cInvalidatedPages; i++)
761 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
762 }
763
764 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
765 if (RT_FAILURE(rc))
766 return rc;
767
768 /* check the terminator. */
769 rc = SSMR3GetU32(pSSM, &u32Sep);
770 if (RT_FAILURE(rc))
771 return rc;
772 if (u32Sep != ~0U)
773 {
774 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
775 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
776 }
777
778 /*
779 * Get the CPUID features.
780 */
781 PVMCPU pVCpu = VMMGetCpu(pVM);
782 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
783 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
784
785 /*
786 * Stop ignoring ignorable notifications.
787 */
788 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
789
790 /*
791 * Sync the whole CPU state when executing code in the recompiler.
792 */
793 for (i = 0; i < pVM->cCpus; i++)
794 {
795 PVMCPU pVCpu = &pVM->aCpus[i];
796 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
797 }
798 return VINF_SUCCESS;
799}
800
801
802
803#undef LOG_GROUP
804#define LOG_GROUP LOG_GROUP_REM_RUN
805
806/**
807 * Single steps an instruction in recompiled mode.
808 *
809 * Before calling this function the REM state needs to be in sync with
810 * the VM. Call REMR3State() to perform the sync. It's only necessary
811 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
812 * and after calling REMR3StateBack().
813 *
814 * @returns VBox status code.
815 *
816 * @param pVM VM Handle.
817 * @param pVCpu VMCPU Handle.
818 */
819REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
820{
821 int rc, interrupt_request;
822 RTGCPTR GCPtrPC;
823 bool fBp;
824
825 /*
826 * Lock the REM - we don't wanna have anyone interrupting us
827 * while stepping - and enabled single stepping. We also ignore
828 * pending interrupts and suchlike.
829 */
830 interrupt_request = pVM->rem.s.Env.interrupt_request;
831 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
832 pVM->rem.s.Env.interrupt_request = 0;
833 cpu_single_step(&pVM->rem.s.Env, 1);
834
835 /*
836 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
837 */
838 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
839 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
840
841 /*
842 * Execute and handle the return code.
843 * We execute without enabling the cpu tick, so on success we'll
844 * just flip it on and off to make sure it moves
845 */
846 rc = cpu_exec(&pVM->rem.s.Env);
847 if (rc == EXCP_DEBUG)
848 {
849 TMR3NotifyResume(pVM, pVCpu);
850 TMR3NotifySuspend(pVM, pVCpu);
851 rc = VINF_EM_DBG_STEPPED;
852 }
853 else
854 {
855 switch (rc)
856 {
857 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
858 case EXCP_HLT:
859 case EXCP_HALTED: rc = VINF_EM_HALT; break;
860 case EXCP_RC:
861 rc = pVM->rem.s.rc;
862 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
863 break;
864 case EXCP_EXECUTE_RAW:
865 case EXCP_EXECUTE_HWACC:
866 /** @todo: is it correct? No! */
867 rc = VINF_SUCCESS;
868 break;
869 default:
870 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
871 rc = VERR_INTERNAL_ERROR;
872 break;
873 }
874 }
875
876 /*
877 * Restore the stuff we changed to prevent interruption.
878 * Unlock the REM.
879 */
880 if (fBp)
881 {
882 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
883 Assert(rc2 == 0); NOREF(rc2);
884 }
885 cpu_single_step(&pVM->rem.s.Env, 0);
886 pVM->rem.s.Env.interrupt_request = interrupt_request;
887
888 return rc;
889}
890
891
892/**
893 * Set a breakpoint using the REM facilities.
894 *
895 * @returns VBox status code.
896 * @param pVM The VM handle.
897 * @param Address The breakpoint address.
898 * @thread The emulation thread.
899 */
900REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
901{
902 VM_ASSERT_EMT(pVM);
903 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
904 {
905 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
906 return VINF_SUCCESS;
907 }
908 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
909 return VERR_REM_NO_MORE_BP_SLOTS;
910}
911
912
913/**
914 * Clears a breakpoint set by REMR3BreakpointSet().
915 *
916 * @returns VBox status code.
917 * @param pVM The VM handle.
918 * @param Address The breakpoint address.
919 * @thread The emulation thread.
920 */
921REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
922{
923 VM_ASSERT_EMT(pVM);
924 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
925 {
926 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
927 return VINF_SUCCESS;
928 }
929 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
930 return VERR_REM_BP_NOT_FOUND;
931}
932
933
934/**
935 * Emulate an instruction.
936 *
937 * This function executes one instruction without letting anyone
938 * interrupt it. This is intended for being called while being in
939 * raw mode and thus will take care of all the state syncing between
940 * REM and the rest.
941 *
942 * @returns VBox status code.
943 * @param pVM VM handle.
944 * @param pVCpu VMCPU Handle.
945 */
946REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
947{
948 bool fFlushTBs;
949
950 int rc, rc2;
951 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
952
953 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
954 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
955 */
956 if (HWACCMIsEnabled(pVM))
957 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
958
959 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
960 fFlushTBs = pVM->rem.s.fFlushTBs;
961 pVM->rem.s.fFlushTBs = false;
962
963 /*
964 * Sync the state and enable single instruction / single stepping.
965 */
966 rc = REMR3State(pVM, pVCpu);
967 pVM->rem.s.fFlushTBs = fFlushTBs;
968 if (RT_SUCCESS(rc))
969 {
970 int interrupt_request = pVM->rem.s.Env.interrupt_request;
971 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
972#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
973 cpu_single_step(&pVM->rem.s.Env, 0);
974#endif
975 Assert(!pVM->rem.s.Env.singlestep_enabled);
976
977 /*
978 * Now we set the execute single instruction flag and enter the cpu_exec loop.
979 */
980 TMNotifyStartOfExecution(pVCpu);
981 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
982 rc = cpu_exec(&pVM->rem.s.Env);
983 TMNotifyEndOfExecution(pVCpu);
984 switch (rc)
985 {
986 /*
987 * Executed without anything out of the way happening.
988 */
989 case EXCP_SINGLE_INSTR:
990 rc = VINF_EM_RESCHEDULE;
991 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
992 break;
993
994 /*
995 * If we take a trap or start servicing a pending interrupt, we might end up here.
996 * (Timer thread or some other thread wishing EMT's attention.)
997 */
998 case EXCP_INTERRUPT:
999 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
1000 rc = VINF_EM_RESCHEDULE;
1001 break;
1002
1003 /*
1004 * Single step, we assume!
1005 * If there was a breakpoint there we're fucked now.
1006 */
1007 case EXCP_DEBUG:
1008 if (pVM->rem.s.Env.watchpoint_hit)
1009 {
1010 /** @todo deal with watchpoints */
1011 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1012 rc = VINF_EM_DBG_BREAKPOINT;
1013 }
1014 else
1015 {
1016 CPUBreakpoint *pBP;
1017 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1018 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1019 if (pBP->pc == GCPtrPC)
1020 break;
1021 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1022 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1023 }
1024 break;
1025
1026 /*
1027 * hlt instruction.
1028 */
1029 case EXCP_HLT:
1030 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1031 rc = VINF_EM_HALT;
1032 break;
1033
1034 /*
1035 * The VM has halted.
1036 */
1037 case EXCP_HALTED:
1038 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1039 rc = VINF_EM_HALT;
1040 break;
1041
1042 /*
1043 * Switch to RAW-mode.
1044 */
1045 case EXCP_EXECUTE_RAW:
1046 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1047 rc = VINF_EM_RESCHEDULE_RAW;
1048 break;
1049
1050 /*
1051 * Switch to hardware accelerated RAW-mode.
1052 */
1053 case EXCP_EXECUTE_HWACC:
1054 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1055 rc = VINF_EM_RESCHEDULE_HWACC;
1056 break;
1057
1058 /*
1059 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1060 */
1061 case EXCP_RC:
1062 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1063 rc = pVM->rem.s.rc;
1064 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1065 break;
1066
1067 /*
1068 * Figure out the rest when they arrive....
1069 */
1070 default:
1071 AssertMsgFailed(("rc=%d\n", rc));
1072 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1073 rc = VINF_EM_RESCHEDULE;
1074 break;
1075 }
1076
1077 /*
1078 * Switch back the state.
1079 */
1080 pVM->rem.s.Env.interrupt_request = interrupt_request;
1081 rc2 = REMR3StateBack(pVM, pVCpu);
1082 AssertRC(rc2);
1083 }
1084
1085 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1086 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1087 return rc;
1088}
1089
1090
1091/**
1092 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1093 *
1094 * @returns VBox status code.
1095 *
1096 * @param pVM The VM handle.
1097 * @param pVCpu The Virtual CPU handle.
1098 */
1099static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1100{
1101 int rc;
1102
1103 Assert(pVM->rem.s.fInREM);
1104#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1105 cpu_single_step(&pVM->rem.s.Env, 1);
1106#else
1107 Assert(!pVM->rem.s.Env.singlestep_enabled);
1108#endif
1109
1110 /*
1111 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1112 */
1113 for (;;)
1114 {
1115 char szBuf[256];
1116
1117 /*
1118 * Log the current registers state and instruction.
1119 */
1120 remR3StateUpdate(pVM, pVCpu);
1121 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1122 szBuf[0] = '\0';
1123 rc = DBGFR3DisasInstrEx(pVM,
1124 pVCpu->idCpu,
1125 0, /* Sel */
1126 0, /* GCPtr */
1127 DBGF_DISAS_FLAGS_CURRENT_GUEST
1128 | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1129 szBuf,
1130 sizeof(szBuf),
1131 NULL);
1132 if (RT_FAILURE(rc))
1133 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1134 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1135
1136 /*
1137 * Execute the instruction.
1138 */
1139 TMNotifyStartOfExecution(pVCpu);
1140
1141 if ( pVM->rem.s.Env.exception_index < 0
1142 || pVM->rem.s.Env.exception_index > 256)
1143 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1144
1145#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1146 pVM->rem.s.Env.interrupt_request = 0;
1147#else
1148 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1149#endif
1150 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1151 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1152 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1153 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1154 pVM->rem.s.Env.interrupt_request,
1155 pVM->rem.s.Env.halted,
1156 pVM->rem.s.Env.exception_index
1157 );
1158
1159 rc = cpu_exec(&pVM->rem.s.Env);
1160
1161 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1162 pVM->rem.s.Env.interrupt_request,
1163 pVM->rem.s.Env.halted,
1164 pVM->rem.s.Env.exception_index
1165 );
1166
1167 TMNotifyEndOfExecution(pVCpu);
1168
1169 switch (rc)
1170 {
1171#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1172 /*
1173 * The normal exit.
1174 */
1175 case EXCP_SINGLE_INSTR:
1176 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1177 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1178 continue;
1179 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1180 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1181 rc = VINF_SUCCESS;
1182 break;
1183
1184#else
1185 /*
1186 * The normal exit, check for breakpoints at PC just to be sure.
1187 */
1188#endif
1189 case EXCP_DEBUG:
1190 if (pVM->rem.s.Env.watchpoint_hit)
1191 {
1192 /** @todo deal with watchpoints */
1193 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1194 rc = VINF_EM_DBG_BREAKPOINT;
1195 }
1196 else
1197 {
1198 CPUBreakpoint *pBP;
1199 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1200 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1201 if (pBP->pc == GCPtrPC)
1202 break;
1203 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1204 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1205 }
1206#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1207 if (rc == VINF_EM_DBG_STEPPED)
1208 {
1209 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1210 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1211 continue;
1212
1213 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1214 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1215 rc = VINF_SUCCESS;
1216 }
1217#endif
1218 break;
1219
1220 /*
1221 * If we take a trap or start servicing a pending interrupt, we might end up here.
1222 * (Timer thread or some other thread wishing EMT's attention.)
1223 */
1224 case EXCP_INTERRUPT:
1225 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1226 rc = VINF_SUCCESS;
1227 break;
1228
1229 /*
1230 * hlt instruction.
1231 */
1232 case EXCP_HLT:
1233 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1234 rc = VINF_EM_HALT;
1235 break;
1236
1237 /*
1238 * The VM has halted.
1239 */
1240 case EXCP_HALTED:
1241 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1242 rc = VINF_EM_HALT;
1243 break;
1244
1245 /*
1246 * Switch to RAW-mode.
1247 */
1248 case EXCP_EXECUTE_RAW:
1249 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1250 rc = VINF_EM_RESCHEDULE_RAW;
1251 break;
1252
1253 /*
1254 * Switch to hardware accelerated RAW-mode.
1255 */
1256 case EXCP_EXECUTE_HWACC:
1257 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1258 rc = VINF_EM_RESCHEDULE_HWACC;
1259 break;
1260
1261 /*
1262 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1263 */
1264 case EXCP_RC:
1265 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1266 rc = pVM->rem.s.rc;
1267 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1268 break;
1269
1270 /*
1271 * Figure out the rest when they arrive....
1272 */
1273 default:
1274 AssertMsgFailed(("rc=%d\n", rc));
1275 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1276 rc = VINF_EM_RESCHEDULE;
1277 break;
1278 }
1279 break;
1280 }
1281
1282#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1283// cpu_single_step(&pVM->rem.s.Env, 0);
1284#else
1285 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1286#endif
1287 return rc;
1288}
1289
1290
1291/**
1292 * Runs code in recompiled mode.
1293 *
1294 * Before calling this function the REM state needs to be in sync with
1295 * the VM. Call REMR3State() to perform the sync. It's only necessary
1296 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1297 * and after calling REMR3StateBack().
1298 *
1299 * @returns VBox status code.
1300 *
1301 * @param pVM VM Handle.
1302 * @param pVCpu VMCPU Handle.
1303 */
1304REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1305{
1306 int rc;
1307
1308 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1309 return remR3RunLoggingStep(pVM, pVCpu);
1310
1311 Assert(pVM->rem.s.fInREM);
1312 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1313
1314 TMNotifyStartOfExecution(pVCpu);
1315 rc = cpu_exec(&pVM->rem.s.Env);
1316 TMNotifyEndOfExecution(pVCpu);
1317 switch (rc)
1318 {
1319 /*
1320 * This happens when the execution was interrupted
1321 * by an external event, like pending timers.
1322 */
1323 case EXCP_INTERRUPT:
1324 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1325 rc = VINF_SUCCESS;
1326 break;
1327
1328 /*
1329 * hlt instruction.
1330 */
1331 case EXCP_HLT:
1332 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1333 rc = VINF_EM_HALT;
1334 break;
1335
1336 /*
1337 * The VM has halted.
1338 */
1339 case EXCP_HALTED:
1340 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1341 rc = VINF_EM_HALT;
1342 break;
1343
1344 /*
1345 * Breakpoint/single step.
1346 */
1347 case EXCP_DEBUG:
1348 if (pVM->rem.s.Env.watchpoint_hit)
1349 {
1350 /** @todo deal with watchpoints */
1351 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1352 rc = VINF_EM_DBG_BREAKPOINT;
1353 }
1354 else
1355 {
1356 CPUBreakpoint *pBP;
1357 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1358 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1359 if (pBP->pc == GCPtrPC)
1360 break;
1361 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1362 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1363 }
1364 break;
1365
1366 /*
1367 * Switch to RAW-mode.
1368 */
1369 case EXCP_EXECUTE_RAW:
1370 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1371 rc = VINF_EM_RESCHEDULE_RAW;
1372 break;
1373
1374 /*
1375 * Switch to hardware accelerated RAW-mode.
1376 */
1377 case EXCP_EXECUTE_HWACC:
1378 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1379 rc = VINF_EM_RESCHEDULE_HWACC;
1380 break;
1381
1382 /*
1383 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1384 */
1385 case EXCP_RC:
1386 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1387 rc = pVM->rem.s.rc;
1388 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1389 break;
1390
1391 /*
1392 * Figure out the rest when they arrive....
1393 */
1394 default:
1395 AssertMsgFailed(("rc=%d\n", rc));
1396 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1397 rc = VINF_SUCCESS;
1398 break;
1399 }
1400
1401 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1402 return rc;
1403}
1404
1405
1406/**
1407 * Check if the cpu state is suitable for Raw execution.
1408 *
1409 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1410 *
1411 * @param env The CPU env struct.
1412 * @param eip The EIP to check this for (might differ from env->eip).
1413 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1414 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1415 *
1416 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1417 */
1418bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1419{
1420 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1421 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1422 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1423 uint32_t u32CR0;
1424
1425#ifdef IEM_VERIFICATION_MODE
1426 return false;
1427#endif
1428
1429 /* Update counter. */
1430 env->pVM->rem.s.cCanExecuteRaw++;
1431
1432 /* Never when single stepping+logging guest code. */
1433 if (env->state & CPU_EMULATE_SINGLE_STEP)
1434 return false;
1435
1436 if (HWACCMIsEnabled(env->pVM))
1437 {
1438 CPUMCTX Ctx;
1439
1440 env->state |= CPU_RAW_HWACC;
1441
1442 /*
1443 * The simple check first...
1444 */
1445 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1446 return false;
1447
1448 /*
1449 * Create partial context for HWACCMR3CanExecuteGuest
1450 */
1451 Ctx.cr0 = env->cr[0];
1452 Ctx.cr3 = env->cr[3];
1453 Ctx.cr4 = env->cr[4];
1454
1455 Ctx.tr.Sel = env->tr.selector;
1456 Ctx.tr.ValidSel = env->tr.selector;
1457 Ctx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1458 Ctx.tr.u64Base = env->tr.base;
1459 Ctx.tr.u32Limit = env->tr.limit;
1460 Ctx.tr.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1461
1462 Ctx.ldtr.Sel = env->ldt.selector;
1463 Ctx.ldtr.ValidSel = env->ldt.selector;
1464 Ctx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1465 Ctx.ldtr.u64Base = env->ldt.base;
1466 Ctx.ldtr.u32Limit = env->ldt.limit;
1467 Ctx.ldtr.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1468
1469 Ctx.idtr.cbIdt = env->idt.limit;
1470 Ctx.idtr.pIdt = env->idt.base;
1471
1472 Ctx.gdtr.cbGdt = env->gdt.limit;
1473 Ctx.gdtr.pGdt = env->gdt.base;
1474
1475 Ctx.rsp = env->regs[R_ESP];
1476 Ctx.rip = env->eip;
1477
1478 Ctx.eflags.u32 = env->eflags;
1479
1480 Ctx.cs.Sel = env->segs[R_CS].selector;
1481 Ctx.cs.ValidSel = env->segs[R_CS].selector;
1482 Ctx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1483 Ctx.cs.u64Base = env->segs[R_CS].base;
1484 Ctx.cs.u32Limit = env->segs[R_CS].limit;
1485 Ctx.cs.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1486
1487 Ctx.ds.Sel = env->segs[R_DS].selector;
1488 Ctx.ds.ValidSel = env->segs[R_DS].selector;
1489 Ctx.ds.fFlags = CPUMSELREG_FLAGS_VALID;
1490 Ctx.ds.u64Base = env->segs[R_DS].base;
1491 Ctx.ds.u32Limit = env->segs[R_DS].limit;
1492 Ctx.ds.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1493
1494 Ctx.es.Sel = env->segs[R_ES].selector;
1495 Ctx.es.ValidSel = env->segs[R_ES].selector;
1496 Ctx.es.fFlags = CPUMSELREG_FLAGS_VALID;
1497 Ctx.es.u64Base = env->segs[R_ES].base;
1498 Ctx.es.u32Limit = env->segs[R_ES].limit;
1499 Ctx.es.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1500
1501 Ctx.fs.Sel = env->segs[R_FS].selector;
1502 Ctx.fs.ValidSel = env->segs[R_FS].selector;
1503 Ctx.fs.fFlags = CPUMSELREG_FLAGS_VALID;
1504 Ctx.fs.u64Base = env->segs[R_FS].base;
1505 Ctx.fs.u32Limit = env->segs[R_FS].limit;
1506 Ctx.fs.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1507
1508 Ctx.gs.Sel = env->segs[R_GS].selector;
1509 Ctx.gs.ValidSel = env->segs[R_GS].selector;
1510 Ctx.gs.fFlags = CPUMSELREG_FLAGS_VALID;
1511 Ctx.gs.u64Base = env->segs[R_GS].base;
1512 Ctx.gs.u32Limit = env->segs[R_GS].limit;
1513 Ctx.gs.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1514
1515 Ctx.ss.Sel = env->segs[R_SS].selector;
1516 Ctx.ss.ValidSel = env->segs[R_SS].selector;
1517 Ctx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1518 Ctx.ss.u64Base = env->segs[R_SS].base;
1519 Ctx.ss.u32Limit = env->segs[R_SS].limit;
1520 Ctx.ss.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1521
1522 Ctx.msrEFER = env->efer;
1523
1524 /* Hardware accelerated raw-mode:
1525 *
1526 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1527 */
1528 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1529 {
1530 *piException = EXCP_EXECUTE_HWACC;
1531 return true;
1532 }
1533 return false;
1534 }
1535
1536 /*
1537 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1538 * or 32 bits protected mode ring 0 code
1539 *
1540 * The tests are ordered by the likelihood of being true during normal execution.
1541 */
1542 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1543 {
1544 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1545 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1546 return false;
1547 }
1548
1549#ifndef VBOX_RAW_V86
1550 if (fFlags & VM_MASK) {
1551 STAM_COUNTER_INC(&gStatRefuseVM86);
1552 Log2(("raw mode refused: VM_MASK\n"));
1553 return false;
1554 }
1555#endif
1556
1557 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1558 {
1559#ifndef DEBUG_bird
1560 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1561#endif
1562 return false;
1563 }
1564
1565 if (env->singlestep_enabled)
1566 {
1567 //Log2(("raw mode refused: Single step\n"));
1568 return false;
1569 }
1570
1571 if (!QTAILQ_EMPTY(&env->breakpoints))
1572 {
1573 //Log2(("raw mode refused: Breakpoints\n"));
1574 return false;
1575 }
1576
1577 if (!QTAILQ_EMPTY(&env->watchpoints))
1578 {
1579 //Log2(("raw mode refused: Watchpoints\n"));
1580 return false;
1581 }
1582
1583 u32CR0 = env->cr[0];
1584 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1585 {
1586 STAM_COUNTER_INC(&gStatRefusePaging);
1587 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1588 return false;
1589 }
1590
1591 if (env->cr[4] & CR4_PAE_MASK)
1592 {
1593 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1594 {
1595 STAM_COUNTER_INC(&gStatRefusePAE);
1596 return false;
1597 }
1598 }
1599
1600 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1601 {
1602 if (!EMIsRawRing3Enabled(env->pVM))
1603 return false;
1604
1605 if (!(env->eflags & IF_MASK))
1606 {
1607 STAM_COUNTER_INC(&gStatRefuseIF0);
1608 Log2(("raw mode refused: IF (RawR3)\n"));
1609 return false;
1610 }
1611
1612 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1613 {
1614 STAM_COUNTER_INC(&gStatRefuseWP0);
1615 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1616 return false;
1617 }
1618 }
1619 else
1620 {
1621 if (!EMIsRawRing0Enabled(env->pVM))
1622 return false;
1623
1624 // Let's start with pure 32 bits ring 0 code first
1625 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1626 {
1627 STAM_COUNTER_INC(&gStatRefuseCode16);
1628 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1629 return false;
1630 }
1631
1632 // Only R0
1633 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1634 {
1635 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1636 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1637 return false;
1638 }
1639
1640 if (!(u32CR0 & CR0_WP_MASK))
1641 {
1642 STAM_COUNTER_INC(&gStatRefuseWP0);
1643 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1644 return false;
1645 }
1646
1647 if (PATMIsPatchGCAddr(env->pVM, eip))
1648 {
1649 Log2(("raw r0 mode forced: patch code\n"));
1650 *piException = EXCP_EXECUTE_RAW;
1651 return true;
1652 }
1653
1654#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1655 if (!(env->eflags & IF_MASK))
1656 {
1657 STAM_COUNTER_INC(&gStatRefuseIF0);
1658 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1659 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1660 return false;
1661 }
1662#endif
1663
1664 env->state |= CPU_RAW_RING0;
1665 }
1666
1667 /*
1668 * Don't reschedule the first time we're called, because there might be
1669 * special reasons why we're here that is not covered by the above checks.
1670 */
1671 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1672 {
1673 Log2(("raw mode refused: first scheduling\n"));
1674 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1675 return false;
1676 }
1677
1678 /*
1679 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1680 */
1681 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1682 {
1683 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector));
1684 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]);
1685 return EMSTATE_REM;
1686 }
1687 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1688 {
1689 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector));
1690 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]);
1691 return EMSTATE_REM;
1692 }
1693 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1694 {
1695 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector));
1696 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]);
1697 return EMSTATE_REM;
1698 }
1699 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1700 {
1701 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector));
1702 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]);
1703 return EMSTATE_REM;
1704 }
1705 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1706 {
1707 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector));
1708 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]);
1709 return EMSTATE_REM;
1710 }
1711 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1712 {
1713 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector));
1714 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]);
1715 return EMSTATE_REM;
1716 }
1717
1718/* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
1719 *piException = EXCP_EXECUTE_RAW;
1720 return true;
1721}
1722
1723
1724/**
1725 * Fetches a code byte.
1726 *
1727 * @returns Success indicator (bool) for ease of use.
1728 * @param env The CPU environment structure.
1729 * @param GCPtrInstr Where to fetch code.
1730 * @param pu8Byte Where to store the byte on success
1731 */
1732bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1733{
1734 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1735 if (RT_SUCCESS(rc))
1736 return true;
1737 return false;
1738}
1739
1740
1741/**
1742 * Flush (or invalidate if you like) page table/dir entry.
1743 *
1744 * (invlpg instruction; tlb_flush_page)
1745 *
1746 * @param env Pointer to cpu environment.
1747 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1748 */
1749void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1750{
1751 PVM pVM = env->pVM;
1752 PCPUMCTX pCtx;
1753 int rc;
1754
1755 Assert(EMRemIsLockOwner(env->pVM));
1756
1757 /*
1758 * When we're replaying invlpg instructions or restoring a saved
1759 * state we disable this path.
1760 */
1761 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1762 return;
1763 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1764 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1765
1766 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1767
1768 /*
1769 * Update the control registers before calling PGMFlushPage.
1770 */
1771 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1772 Assert(pCtx);
1773 pCtx->cr0 = env->cr[0];
1774 pCtx->cr3 = env->cr[3];
1775 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1776 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1777 pCtx->cr4 = env->cr[4];
1778
1779 /*
1780 * Let PGM do the rest.
1781 */
1782 Assert(env->pVCpu);
1783 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1784 if (RT_FAILURE(rc))
1785 {
1786 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1787 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1788 }
1789 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1790}
1791
1792
1793#ifndef REM_PHYS_ADDR_IN_TLB
1794/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1795void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1796{
1797 void *pv;
1798 int rc;
1799
1800
1801 /* Address must be aligned enough to fiddle with lower bits */
1802 Assert((physAddr & 0x3) == 0);
1803 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1804
1805 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1806 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1807 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1808 Assert( rc == VINF_SUCCESS
1809 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1810 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1811 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1812 if (RT_FAILURE(rc))
1813 return (void *)1;
1814 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1815 return (void *)((uintptr_t)pv | 2);
1816 return pv;
1817}
1818#endif /* REM_PHYS_ADDR_IN_TLB */
1819
1820
1821/**
1822 * Called from tlb_protect_code in order to write monitor a code page.
1823 *
1824 * @param env Pointer to the CPU environment.
1825 * @param GCPtr Code page to monitor
1826 */
1827void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1828{
1829#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1830 Assert(env->pVM->rem.s.fInREM);
1831 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1832 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1833 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1834 && !(env->eflags & VM_MASK) /* no V86 mode */
1835 && !HWACCMIsEnabled(env->pVM))
1836 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1837#endif
1838}
1839
1840
1841/**
1842 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1843 *
1844 * @param env Pointer to the CPU environment.
1845 * @param GCPtr Code page to monitor
1846 */
1847void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1848{
1849 Assert(env->pVM->rem.s.fInREM);
1850#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1851 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1852 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1853 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1854 && !(env->eflags & VM_MASK) /* no V86 mode */
1855 && !HWACCMIsEnabled(env->pVM))
1856 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1857#endif
1858}
1859
1860
1861/**
1862 * Called when the CPU is initialized, any of the CRx registers are changed or
1863 * when the A20 line is modified.
1864 *
1865 * @param env Pointer to the CPU environment.
1866 * @param fGlobal Set if the flush is global.
1867 */
1868void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1869{
1870 PVM pVM = env->pVM;
1871 PCPUMCTX pCtx;
1872 Assert(EMRemIsLockOwner(pVM));
1873
1874 /*
1875 * When we're replaying invlpg instructions or restoring a saved
1876 * state we disable this path.
1877 */
1878 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1879 return;
1880 Assert(pVM->rem.s.fInREM);
1881
1882 /*
1883 * The caller doesn't check cr4, so we have to do that for ourselves.
1884 */
1885 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1886 fGlobal = true;
1887 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1888
1889 /*
1890 * Update the control registers before calling PGMR3FlushTLB.
1891 */
1892 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1893 Assert(pCtx);
1894 pCtx->cr0 = env->cr[0];
1895 pCtx->cr3 = env->cr[3];
1896 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1897 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1898 pCtx->cr4 = env->cr[4];
1899
1900 /*
1901 * Let PGM do the rest.
1902 */
1903 Assert(env->pVCpu);
1904 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1905}
1906
1907
1908/**
1909 * Called when any of the cr0, cr4 or efer registers is updated.
1910 *
1911 * @param env Pointer to the CPU environment.
1912 */
1913void remR3ChangeCpuMode(CPUX86State *env)
1914{
1915 PVM pVM = env->pVM;
1916 uint64_t efer;
1917 PCPUMCTX pCtx;
1918 int rc;
1919
1920 /*
1921 * When we're replaying loads or restoring a saved
1922 * state this path is disabled.
1923 */
1924 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1925 return;
1926 Assert(pVM->rem.s.fInREM);
1927
1928 /*
1929 * Update the control registers before calling PGMChangeMode()
1930 * as it may need to map whatever cr3 is pointing to.
1931 */
1932 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1933 Assert(pCtx);
1934 pCtx->cr0 = env->cr[0];
1935 pCtx->cr3 = env->cr[3];
1936 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1937 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1938 pCtx->cr4 = env->cr[4];
1939#ifdef TARGET_X86_64
1940 efer = env->efer;
1941 pCtx->msrEFER = efer;
1942#else
1943 efer = 0;
1944#endif
1945 Assert(env->pVCpu);
1946 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1947 if (rc != VINF_SUCCESS)
1948 {
1949 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1950 {
1951 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1952 remR3RaiseRC(env->pVM, rc);
1953 }
1954 else
1955 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1956 }
1957}
1958
1959
1960/**
1961 * Called from compiled code to run dma.
1962 *
1963 * @param env Pointer to the CPU environment.
1964 */
1965void remR3DmaRun(CPUX86State *env)
1966{
1967 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1968 PDMR3DmaRun(env->pVM);
1969 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1970}
1971
1972
1973/**
1974 * Called from compiled code to schedule pending timers in VMM
1975 *
1976 * @param env Pointer to the CPU environment.
1977 */
1978void remR3TimersRun(CPUX86State *env)
1979{
1980 LogFlow(("remR3TimersRun:\n"));
1981 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1982 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1983 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1984 TMR3TimerQueuesDo(env->pVM);
1985 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1986 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1987}
1988
1989
1990/**
1991 * Record trap occurrence
1992 *
1993 * @returns VBox status code
1994 * @param env Pointer to the CPU environment.
1995 * @param uTrap Trap nr
1996 * @param uErrorCode Error code
1997 * @param pvNextEIP Next EIP
1998 */
1999int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
2000{
2001 PVM pVM = env->pVM;
2002#ifdef VBOX_WITH_STATISTICS
2003 static STAMCOUNTER s_aStatTrap[255];
2004 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
2005#endif
2006
2007#ifdef VBOX_WITH_STATISTICS
2008 if (uTrap < 255)
2009 {
2010 if (!s_aRegisters[uTrap])
2011 {
2012 char szStatName[64];
2013 s_aRegisters[uTrap] = true;
2014 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
2015 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
2016 }
2017 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
2018 }
2019#endif
2020 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2021 if( uTrap < 0x20
2022 && (env->cr[0] & X86_CR0_PE)
2023 && !(env->eflags & X86_EFL_VM))
2024 {
2025#ifdef DEBUG
2026 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
2027#endif
2028 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
2029 {
2030 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2031 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
2032 return VERR_REM_TOO_MANY_TRAPS;
2033 }
2034 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
2035 pVM->rem.s.cPendingExceptions = 1;
2036 pVM->rem.s.uPendingException = uTrap;
2037 pVM->rem.s.uPendingExcptEIP = env->eip;
2038 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2039 }
2040 else
2041 {
2042 pVM->rem.s.cPendingExceptions = 0;
2043 pVM->rem.s.uPendingException = uTrap;
2044 pVM->rem.s.uPendingExcptEIP = env->eip;
2045 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2046 }
2047 return VINF_SUCCESS;
2048}
2049
2050
2051/*
2052 * Clear current active trap
2053 *
2054 * @param pVM VM Handle.
2055 */
2056void remR3TrapClear(PVM pVM)
2057{
2058 pVM->rem.s.cPendingExceptions = 0;
2059 pVM->rem.s.uPendingException = 0;
2060 pVM->rem.s.uPendingExcptEIP = 0;
2061 pVM->rem.s.uPendingExcptCR2 = 0;
2062}
2063
2064
2065/*
2066 * Record previous call instruction addresses
2067 *
2068 * @param env Pointer to the CPU environment.
2069 */
2070void remR3RecordCall(CPUX86State *env)
2071{
2072 CSAMR3RecordCallAddress(env->pVM, env->eip);
2073}
2074
2075
2076/**
2077 * Syncs the internal REM state with the VM.
2078 *
2079 * This must be called before REMR3Run() is invoked whenever when the REM
2080 * state is not up to date. Calling it several times in a row is not
2081 * permitted.
2082 *
2083 * @returns VBox status code.
2084 *
2085 * @param pVM VM Handle.
2086 * @param pVCpu VMCPU Handle.
2087 *
2088 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2089 * no do this since the majority of the callers don't want any unnecessary of events
2090 * pending that would immediately interrupt execution.
2091 */
2092REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2093{
2094 register const CPUMCTX *pCtx;
2095 register unsigned fFlags;
2096 unsigned i;
2097 TRPMEVENT enmType;
2098 uint8_t u8TrapNo;
2099 uint32_t uCpl;
2100 int rc;
2101
2102 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2103 Log2(("REMR3State:\n"));
2104
2105 pVM->rem.s.Env.pVCpu = pVCpu;
2106 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2107
2108 Assert(!pVM->rem.s.fInREM);
2109 pVM->rem.s.fInStateSync = true;
2110
2111 /*
2112 * If we have to flush TBs, do that immediately.
2113 */
2114 if (pVM->rem.s.fFlushTBs)
2115 {
2116 STAM_COUNTER_INC(&gStatFlushTBs);
2117 tb_flush(&pVM->rem.s.Env);
2118 pVM->rem.s.fFlushTBs = false;
2119 }
2120
2121 /*
2122 * Copy the registers which require no special handling.
2123 */
2124#ifdef TARGET_X86_64
2125 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2126 Assert(R_EAX == 0);
2127 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2128 Assert(R_ECX == 1);
2129 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2130 Assert(R_EDX == 2);
2131 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2132 Assert(R_EBX == 3);
2133 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2134 Assert(R_ESP == 4);
2135 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2136 Assert(R_EBP == 5);
2137 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2138 Assert(R_ESI == 6);
2139 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2140 Assert(R_EDI == 7);
2141 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2142 pVM->rem.s.Env.regs[8] = pCtx->r8;
2143 pVM->rem.s.Env.regs[9] = pCtx->r9;
2144 pVM->rem.s.Env.regs[10] = pCtx->r10;
2145 pVM->rem.s.Env.regs[11] = pCtx->r11;
2146 pVM->rem.s.Env.regs[12] = pCtx->r12;
2147 pVM->rem.s.Env.regs[13] = pCtx->r13;
2148 pVM->rem.s.Env.regs[14] = pCtx->r14;
2149 pVM->rem.s.Env.regs[15] = pCtx->r15;
2150
2151 pVM->rem.s.Env.eip = pCtx->rip;
2152
2153 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2154#else
2155 Assert(R_EAX == 0);
2156 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2157 Assert(R_ECX == 1);
2158 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2159 Assert(R_EDX == 2);
2160 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2161 Assert(R_EBX == 3);
2162 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2163 Assert(R_ESP == 4);
2164 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2165 Assert(R_EBP == 5);
2166 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2167 Assert(R_ESI == 6);
2168 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2169 Assert(R_EDI == 7);
2170 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2171 pVM->rem.s.Env.eip = pCtx->eip;
2172
2173 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2174#endif
2175
2176 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2177
2178 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2179 for (i=0;i<8;i++)
2180 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2181
2182#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2183 /*
2184 * Clear the halted hidden flag (the interrupt waking up the CPU can
2185 * have been dispatched in raw mode).
2186 */
2187 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2188#endif
2189
2190 /*
2191 * Replay invlpg? Only if we're not flushing the TLB.
2192 */
2193 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2194 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2195 if (pVM->rem.s.cInvalidatedPages)
2196 {
2197 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2198 {
2199 RTUINT i;
2200
2201 pVM->rem.s.fIgnoreCR3Load = true;
2202 pVM->rem.s.fIgnoreInvlPg = true;
2203 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2204 {
2205 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2206 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2207 }
2208 pVM->rem.s.fIgnoreInvlPg = false;
2209 pVM->rem.s.fIgnoreCR3Load = false;
2210 }
2211 pVM->rem.s.cInvalidatedPages = 0;
2212 }
2213
2214 /* Replay notification changes. */
2215 REMR3ReplayHandlerNotifications(pVM);
2216
2217 /* Update MSRs; before CRx registers! */
2218 pVM->rem.s.Env.efer = pCtx->msrEFER;
2219 pVM->rem.s.Env.star = pCtx->msrSTAR;
2220 pVM->rem.s.Env.pat = pCtx->msrPAT;
2221#ifdef TARGET_X86_64
2222 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2223 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2224 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2225 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2226
2227 /* Update the internal long mode activate flag according to the new EFER value. */
2228 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2229 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2230 else
2231 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2232#endif
2233
2234 /* Update the inhibit IRQ mask. */
2235 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2236 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2237 {
2238 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2239 if (InhibitPC == pCtx->rip)
2240 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2241 else
2242 {
2243 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2244 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2245 }
2246 }
2247
2248 /*
2249 * Sync the A20 gate.
2250 */
2251 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2252 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2253 {
2254 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2255 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2256 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2257 }
2258
2259 /*
2260 * Registers which are rarely changed and require special handling / order when changed.
2261 */
2262 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2263 | CPUM_CHANGED_CR4
2264 | CPUM_CHANGED_CR0
2265 | CPUM_CHANGED_CR3
2266 | CPUM_CHANGED_GDTR
2267 | CPUM_CHANGED_IDTR
2268 | CPUM_CHANGED_SYSENTER_MSR
2269 | CPUM_CHANGED_LDTR
2270 | CPUM_CHANGED_CPUID
2271 | CPUM_CHANGED_FPU_REM
2272 )
2273 )
2274 {
2275 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2276 {
2277 pVM->rem.s.fIgnoreCR3Load = true;
2278 tlb_flush(&pVM->rem.s.Env, true);
2279 pVM->rem.s.fIgnoreCR3Load = false;
2280 }
2281
2282 /* CR4 before CR0! */
2283 if (fFlags & CPUM_CHANGED_CR4)
2284 {
2285 pVM->rem.s.fIgnoreCR3Load = true;
2286 pVM->rem.s.fIgnoreCpuMode = true;
2287 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2288 pVM->rem.s.fIgnoreCpuMode = false;
2289 pVM->rem.s.fIgnoreCR3Load = false;
2290 }
2291
2292 if (fFlags & CPUM_CHANGED_CR0)
2293 {
2294 pVM->rem.s.fIgnoreCR3Load = true;
2295 pVM->rem.s.fIgnoreCpuMode = true;
2296 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2297 pVM->rem.s.fIgnoreCpuMode = false;
2298 pVM->rem.s.fIgnoreCR3Load = false;
2299 }
2300
2301 if (fFlags & CPUM_CHANGED_CR3)
2302 {
2303 pVM->rem.s.fIgnoreCR3Load = true;
2304 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2305 pVM->rem.s.fIgnoreCR3Load = false;
2306 }
2307
2308 if (fFlags & CPUM_CHANGED_GDTR)
2309 {
2310 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2311 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2312 }
2313
2314 if (fFlags & CPUM_CHANGED_IDTR)
2315 {
2316 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2317 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2318 }
2319
2320 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2321 {
2322 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2323 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2324 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2325 }
2326
2327 if (fFlags & CPUM_CHANGED_LDTR)
2328 {
2329 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2330 {
2331 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2332 pVM->rem.s.Env.ldt.newselector = 0;
2333 pVM->rem.s.Env.ldt.fVBoxFlags = pCtx->ldtr.fFlags;
2334 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2335 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2336 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u << 8) & 0xFFFFFF;
2337 }
2338 else
2339 {
2340 AssertFailed(); /* Shouldn't happen, see cpumR3LoadExec. */
2341 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2342 }
2343 }
2344
2345 if (fFlags & CPUM_CHANGED_CPUID)
2346 {
2347 uint32_t u32Dummy;
2348
2349 /*
2350 * Get the CPUID features.
2351 */
2352 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2353 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2354 }
2355
2356 /* Sync FPU state after CR4, CPUID and EFER (!). */
2357 if (fFlags & CPUM_CHANGED_FPU_REM)
2358 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2359 }
2360
2361 /*
2362 * Sync TR unconditionally to make life simpler.
2363 */
2364 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2365 pVM->rem.s.Env.tr.newselector = 0;
2366 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags;
2367 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2368 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2369 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u << 8) & 0xFFFFFF;
2370 /* Note! do_interrupt will fault if the busy flag is still set... */
2371 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2372
2373 /*
2374 * Update selector registers.
2375 *
2376 * This must be done *after* we've synced gdt, ldt and crX registers
2377 * since we're reading the GDT/LDT om sync_seg. This will happen with
2378 * saved state which takes a quick dip into rawmode for instance.
2379 *
2380 * CPL/Stack; Note first check this one as the CPL might have changed.
2381 * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
2382 */
2383 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2384 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2385#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
2386 do \
2387 { \
2388 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
2389 { \
2390 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
2391 (a_pVBoxSReg)->Sel, \
2392 (a_pVBoxSReg)->u64Base, \
2393 (a_pVBoxSReg)->u32Limit, \
2394 ((a_pVBoxSReg)->Attr.u << 8) & 0xFFFFFF); \
2395 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
2396 } \
2397 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
2398 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
2399 { \
2400 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
2401 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
2402 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
2403 if ((a_pRemSReg)->newselector) \
2404 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
2405 } \
2406 else \
2407 (a_pRemSReg)->newselector = 0; \
2408 } while (0)
2409
2410 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
2411 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
2412 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
2413 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
2414 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
2415 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
2416 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2417 * be the same but not the base/limit. */
2418
2419 /*
2420 * Check for traps.
2421 */
2422 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2423 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2424 if (RT_SUCCESS(rc))
2425 {
2426#ifdef DEBUG
2427 if (u8TrapNo == 0x80)
2428 {
2429 remR3DumpLnxSyscall(pVCpu);
2430 remR3DumpOBsdSyscall(pVCpu);
2431 }
2432#endif
2433
2434 pVM->rem.s.Env.exception_index = u8TrapNo;
2435 if (enmType != TRPM_SOFTWARE_INT)
2436 {
2437 pVM->rem.s.Env.exception_is_int = 0;
2438 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2439 }
2440 else
2441 {
2442 /*
2443 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2444 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2445 * for int03 and into.
2446 */
2447 pVM->rem.s.Env.exception_is_int = 1;
2448 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2449 /* int 3 may be generated by one-byte 0xcc */
2450 if (u8TrapNo == 3)
2451 {
2452 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2453 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2454 }
2455 /* int 4 may be generated by one-byte 0xce */
2456 else if (u8TrapNo == 4)
2457 {
2458 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2459 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2460 }
2461 }
2462
2463 /* get error code and cr2 if needed. */
2464 if (enmType == TRPM_TRAP)
2465 {
2466 switch (u8TrapNo)
2467 {
2468 case 0x0e:
2469 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2470 /* fallthru */
2471 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2472 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2473 break;
2474
2475 case 0x11: case 0x08:
2476 default:
2477 pVM->rem.s.Env.error_code = 0;
2478 break;
2479 }
2480 }
2481 else
2482 pVM->rem.s.Env.error_code = 0;
2483
2484 /*
2485 * We can now reset the active trap since the recompiler is gonna have a go at it.
2486 */
2487 rc = TRPMResetTrap(pVCpu);
2488 AssertRC(rc);
2489 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2490 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2491 }
2492
2493 /*
2494 * Clear old interrupt request flags; Check for pending hardware interrupts.
2495 * (See @remark for why we don't check for other FFs.)
2496 */
2497 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2498 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2499 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2500 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2501
2502 /*
2503 * We're now in REM mode.
2504 */
2505 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2506 pVM->rem.s.fInREM = true;
2507 pVM->rem.s.fInStateSync = false;
2508 pVM->rem.s.cCanExecuteRaw = 0;
2509 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2510 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2511 return VINF_SUCCESS;
2512}
2513
2514
2515/**
2516 * Syncs back changes in the REM state to the the VM state.
2517 *
2518 * This must be called after invoking REMR3Run().
2519 * Calling it several times in a row is not permitted.
2520 *
2521 * @returns VBox status code.
2522 *
2523 * @param pVM VM Handle.
2524 * @param pVCpu VMCPU Handle.
2525 */
2526REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2527{
2528 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2529 Assert(pCtx);
2530 unsigned i;
2531
2532 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2533 Log2(("REMR3StateBack:\n"));
2534 Assert(pVM->rem.s.fInREM);
2535
2536 /*
2537 * Copy back the registers.
2538 * This is done in the order they are declared in the CPUMCTX structure.
2539 */
2540
2541 /** @todo FOP */
2542 /** @todo FPUIP */
2543 /** @todo CS */
2544 /** @todo FPUDP */
2545 /** @todo DS */
2546
2547 /** @todo check if FPU/XMM was actually used in the recompiler */
2548 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2549//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2550
2551#ifdef TARGET_X86_64
2552 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2553 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2554 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2555 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2556 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2557 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2558 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2559 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2560 pCtx->r8 = pVM->rem.s.Env.regs[8];
2561 pCtx->r9 = pVM->rem.s.Env.regs[9];
2562 pCtx->r10 = pVM->rem.s.Env.regs[10];
2563 pCtx->r11 = pVM->rem.s.Env.regs[11];
2564 pCtx->r12 = pVM->rem.s.Env.regs[12];
2565 pCtx->r13 = pVM->rem.s.Env.regs[13];
2566 pCtx->r14 = pVM->rem.s.Env.regs[14];
2567 pCtx->r15 = pVM->rem.s.Env.regs[15];
2568
2569 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2570
2571#else
2572 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2573 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2574 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2575 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2576 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2577 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2578 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2579
2580 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2581#endif
2582
2583#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2584 do \
2585 { \
2586 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2587 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2588 { \
2589 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2590 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2591 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2592 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2593 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */ \
2594 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> 8) & 0xF0FF; \
2595 } \
2596 else \
2597 { \
2598 pCtx->a_sreg.fFlags = 0; \
2599 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2600 } \
2601 } while (0)
2602
2603 SYNC_BACK_SREG(es, ES);
2604 SYNC_BACK_SREG(cs, CS);
2605 SYNC_BACK_SREG(ss, SS);
2606 SYNC_BACK_SREG(ds, DS);
2607 SYNC_BACK_SREG(fs, FS);
2608 SYNC_BACK_SREG(gs, GS);
2609
2610#ifdef TARGET_X86_64
2611 pCtx->rip = pVM->rem.s.Env.eip;
2612 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2613#else
2614 pCtx->eip = pVM->rem.s.Env.eip;
2615 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2616#endif
2617
2618 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2619 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2620 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2621 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2622 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2623 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2624
2625 for (i = 0; i < 8; i++)
2626 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2627
2628 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2629 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2630 {
2631 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2632 STAM_COUNTER_INC(&gStatREMGDTChange);
2633 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2634 }
2635
2636 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2637 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2638 {
2639 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2640 STAM_COUNTER_INC(&gStatREMIDTChange);
2641 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2642 }
2643
2644 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2645 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2646 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2647 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2648 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)
2649 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2650 )
2651 {
2652 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2653 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2654 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2655 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2656 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2657 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2658 STAM_COUNTER_INC(&gStatREMLDTRChange);
2659 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2660 }
2661
2662 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2663 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2664 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2665 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2666 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2667 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2668 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2669 : 0)
2670 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2671 )
2672 {
2673 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2674 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2675 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2676 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2677 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2678 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2679 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2680 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2681 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2682 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2683 if (pCtx->tr.Attr.u)
2684 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2685 STAM_COUNTER_INC(&gStatREMTRChange);
2686 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2687 }
2688
2689 /* Sysenter MSR */
2690 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2691 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2692 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2693
2694 /* System MSRs. */
2695 pCtx->msrEFER = pVM->rem.s.Env.efer;
2696 pCtx->msrSTAR = pVM->rem.s.Env.star;
2697 pCtx->msrPAT = pVM->rem.s.Env.pat;
2698#ifdef TARGET_X86_64
2699 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2700 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2701 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2702 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2703#endif
2704
2705 /* Inhibit interrupt flag. */
2706 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2707 {
2708 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2709 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2710 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2711 }
2712 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2713 {
2714 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2715 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2716 }
2717
2718 remR3TrapClear(pVM);
2719
2720 /*
2721 * Check for traps.
2722 */
2723 if ( pVM->rem.s.Env.exception_index >= 0
2724 && pVM->rem.s.Env.exception_index < 256)
2725 {
2726 int rc;
2727
2728 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2729 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2730 AssertRC(rc);
2731 switch (pVM->rem.s.Env.exception_index)
2732 {
2733 case 0x0e:
2734 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2735 /* fallthru */
2736 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2737 case 0x11: case 0x08: /* 0 */
2738 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2739 break;
2740 }
2741
2742 }
2743
2744 /*
2745 * We're not longer in REM mode.
2746 */
2747 CPUMR3RemLeave(pVCpu,
2748 HWACCMIsEnabled(pVM)
2749 || ( pVM->rem.s.Env.segs[R_SS].newselector
2750 | pVM->rem.s.Env.segs[R_GS].newselector
2751 | pVM->rem.s.Env.segs[R_FS].newselector
2752 | pVM->rem.s.Env.segs[R_ES].newselector
2753 | pVM->rem.s.Env.segs[R_DS].newselector
2754 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2755 );
2756 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2757 pVM->rem.s.fInREM = false;
2758 pVM->rem.s.pCtx = NULL;
2759 pVM->rem.s.Env.pVCpu = NULL;
2760 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2761 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2762 return VINF_SUCCESS;
2763}
2764
2765
2766/**
2767 * This is called by the disassembler when it wants to update the cpu state
2768 * before for instance doing a register dump.
2769 */
2770static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2771{
2772 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2773 unsigned i;
2774
2775 Assert(pVM->rem.s.fInREM);
2776
2777 /*
2778 * Copy back the registers.
2779 * This is done in the order they are declared in the CPUMCTX structure.
2780 */
2781
2782 /** @todo FOP */
2783 /** @todo FPUIP */
2784 /** @todo CS */
2785 /** @todo FPUDP */
2786 /** @todo DS */
2787 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2788 pCtx->fpu.MXCSR = 0;
2789 pCtx->fpu.MXCSR_MASK = 0;
2790
2791 /** @todo check if FPU/XMM was actually used in the recompiler */
2792 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2793//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2794
2795#ifdef TARGET_X86_64
2796 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2797 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2798 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2799 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2800 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2801 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2802 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2803 pCtx->r8 = pVM->rem.s.Env.regs[8];
2804 pCtx->r9 = pVM->rem.s.Env.regs[9];
2805 pCtx->r10 = pVM->rem.s.Env.regs[10];
2806 pCtx->r11 = pVM->rem.s.Env.regs[11];
2807 pCtx->r12 = pVM->rem.s.Env.regs[12];
2808 pCtx->r13 = pVM->rem.s.Env.regs[13];
2809 pCtx->r14 = pVM->rem.s.Env.regs[14];
2810 pCtx->r15 = pVM->rem.s.Env.regs[15];
2811
2812 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2813#else
2814 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2815 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2816 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2817 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2818 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2819 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2820 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2821
2822 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2823#endif
2824
2825 SYNC_BACK_SREG(es, ES);
2826 SYNC_BACK_SREG(cs, CS);
2827 SYNC_BACK_SREG(ss, SS);
2828 SYNC_BACK_SREG(ds, DS);
2829 SYNC_BACK_SREG(fs, FS);
2830 SYNC_BACK_SREG(gs, GS);
2831
2832#ifdef TARGET_X86_64
2833 pCtx->rip = pVM->rem.s.Env.eip;
2834 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2835#else
2836 pCtx->eip = pVM->rem.s.Env.eip;
2837 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2838#endif
2839
2840 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2841 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2842 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2843 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2844 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2845 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2846
2847 for (i = 0; i < 8; i++)
2848 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2849
2850 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2851 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2852 {
2853 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2854 STAM_COUNTER_INC(&gStatREMGDTChange);
2855 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2856 }
2857
2858 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2859 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2860 {
2861 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2862 STAM_COUNTER_INC(&gStatREMIDTChange);
2863 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2864 }
2865
2866 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2867 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2868 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2869 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2870 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF)
2871 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2872 )
2873 {
2874 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2875 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2876 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2877 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2878 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2879 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2880 STAM_COUNTER_INC(&gStatREMLDTRChange);
2881 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2882 }
2883
2884 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2885 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2886 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2887 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2888 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2889 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2890 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2891 : 0)
2892 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2893 )
2894 {
2895 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2896 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2897 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2898 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2899 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2900 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2901 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2902 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2903 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2904 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2905 if (pCtx->tr.Attr.u)
2906 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2907 STAM_COUNTER_INC(&gStatREMTRChange);
2908 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2909 }
2910
2911 /* Sysenter MSR */
2912 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2913 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2914 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2915
2916 /* System MSRs. */
2917 pCtx->msrEFER = pVM->rem.s.Env.efer;
2918 pCtx->msrSTAR = pVM->rem.s.Env.star;
2919 pCtx->msrPAT = pVM->rem.s.Env.pat;
2920#ifdef TARGET_X86_64
2921 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2922 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2923 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2924 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2925#endif
2926
2927}
2928
2929
2930/**
2931 * Update the VMM state information if we're currently in REM.
2932 *
2933 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2934 * we're currently executing in REM and the VMM state is invalid. This method will of
2935 * course check that we're executing in REM before syncing any data over to the VMM.
2936 *
2937 * @param pVM The VM handle.
2938 * @param pVCpu The VMCPU handle.
2939 */
2940REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2941{
2942 if (pVM->rem.s.fInREM)
2943 remR3StateUpdate(pVM, pVCpu);
2944}
2945
2946
2947#undef LOG_GROUP
2948#define LOG_GROUP LOG_GROUP_REM
2949
2950
2951/**
2952 * Notify the recompiler about Address Gate 20 state change.
2953 *
2954 * This notification is required since A20 gate changes are
2955 * initialized from a device driver and the VM might just as
2956 * well be in REM mode as in RAW mode.
2957 *
2958 * @param pVM VM handle.
2959 * @param pVCpu VMCPU handle.
2960 * @param fEnable True if the gate should be enabled.
2961 * False if the gate should be disabled.
2962 */
2963REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2964{
2965 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2966 VM_ASSERT_EMT(pVM);
2967
2968 /** @todo SMP and the A20 gate... */
2969 if (pVM->rem.s.Env.pVCpu == pVCpu)
2970 {
2971 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2972 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2973 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2974 }
2975}
2976
2977
2978/**
2979 * Replays the handler notification changes
2980 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2981 *
2982 * @param pVM VM handle.
2983 */
2984REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2985{
2986 /*
2987 * Replay the flushes.
2988 */
2989 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2990 VM_ASSERT_EMT(pVM);
2991
2992 /** @todo this isn't ensuring correct replay order. */
2993 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2994 {
2995 uint32_t idxNext;
2996 uint32_t idxRevHead;
2997 uint32_t idxHead;
2998#ifdef VBOX_STRICT
2999 int32_t c = 0;
3000#endif
3001
3002 /* Lockless purging of pending notifications. */
3003 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3004 if (idxHead == UINT32_MAX)
3005 return;
3006 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3007
3008 /*
3009 * Reverse the list to process it in FIFO order.
3010 */
3011 idxRevHead = UINT32_MAX;
3012 do
3013 {
3014 /* Save the index of the next rec. */
3015 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3016 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3017 /* Push the record onto the reversed list. */
3018 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3019 idxRevHead = idxHead;
3020 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3021 /* Advance. */
3022 idxHead = idxNext;
3023 } while (idxHead != UINT32_MAX);
3024
3025 /*
3026 * Loop thru the list, reinserting the record into the free list as they are
3027 * processed to avoid having other EMTs running out of entries while we're flushing.
3028 */
3029 idxHead = idxRevHead;
3030 do
3031 {
3032 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3033 uint32_t idxCur;
3034 Assert(--c >= 0);
3035
3036 switch (pCur->enmKind)
3037 {
3038 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3039 remR3NotifyHandlerPhysicalRegister(pVM,
3040 pCur->u.PhysicalRegister.enmType,
3041 pCur->u.PhysicalRegister.GCPhys,
3042 pCur->u.PhysicalRegister.cb,
3043 pCur->u.PhysicalRegister.fHasHCHandler);
3044 break;
3045
3046 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3047 remR3NotifyHandlerPhysicalDeregister(pVM,
3048 pCur->u.PhysicalDeregister.enmType,
3049 pCur->u.PhysicalDeregister.GCPhys,
3050 pCur->u.PhysicalDeregister.cb,
3051 pCur->u.PhysicalDeregister.fHasHCHandler,
3052 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3053 break;
3054
3055 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3056 remR3NotifyHandlerPhysicalModify(pVM,
3057 pCur->u.PhysicalModify.enmType,
3058 pCur->u.PhysicalModify.GCPhysOld,
3059 pCur->u.PhysicalModify.GCPhysNew,
3060 pCur->u.PhysicalModify.cb,
3061 pCur->u.PhysicalModify.fHasHCHandler,
3062 pCur->u.PhysicalModify.fRestoreAsRAM);
3063 break;
3064
3065 default:
3066 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3067 break;
3068 }
3069
3070 /*
3071 * Advance idxHead.
3072 */
3073 idxCur = idxHead;
3074 idxHead = pCur->idxNext;
3075 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3076
3077 /*
3078 * Put the record back into the free list.
3079 */
3080 do
3081 {
3082 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3083 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3084 ASMCompilerBarrier();
3085 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3086 } while (idxHead != UINT32_MAX);
3087
3088#ifdef VBOX_STRICT
3089 if (pVM->cCpus == 1)
3090 {
3091 unsigned c;
3092 /* Check that all records are now on the free list. */
3093 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3094 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3095 c++;
3096 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3097 }
3098#endif
3099 }
3100}
3101
3102
3103/**
3104 * Notify REM about changed code page.
3105 *
3106 * @returns VBox status code.
3107 * @param pVM VM handle.
3108 * @param pVCpu VMCPU handle.
3109 * @param pvCodePage Code page address
3110 */
3111REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3112{
3113#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3114 int rc;
3115 RTGCPHYS PhysGC;
3116 uint64_t flags;
3117
3118 VM_ASSERT_EMT(pVM);
3119
3120 /*
3121 * Get the physical page address.
3122 */
3123 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3124 if (rc == VINF_SUCCESS)
3125 {
3126 /*
3127 * Sync the required registers and flush the whole page.
3128 * (Easier to do the whole page than notifying it about each physical
3129 * byte that was changed.
3130 */
3131 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3132 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3133 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3134 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3135
3136 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3137 }
3138#endif
3139 return VINF_SUCCESS;
3140}
3141
3142
3143/**
3144 * Notification about a successful MMR3PhysRegister() call.
3145 *
3146 * @param pVM VM handle.
3147 * @param GCPhys The physical address the RAM.
3148 * @param cb Size of the memory.
3149 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3150 */
3151REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3152{
3153 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3154 VM_ASSERT_EMT(pVM);
3155
3156 /*
3157 * Validate input - we trust the caller.
3158 */
3159 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3160 Assert(cb);
3161 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3162 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3163
3164 /*
3165 * Base ram? Update GCPhysLastRam.
3166 */
3167 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3168 {
3169 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3170 {
3171 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3172 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3173 }
3174 }
3175
3176 /*
3177 * Register the ram.
3178 */
3179 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3180
3181 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3182 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3183 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3184
3185 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3186}
3187
3188
3189/**
3190 * Notification about a successful MMR3PhysRomRegister() call.
3191 *
3192 * @param pVM VM handle.
3193 * @param GCPhys The physical address of the ROM.
3194 * @param cb The size of the ROM.
3195 * @param pvCopy Pointer to the ROM copy.
3196 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3197 * This function will be called when ever the protection of the
3198 * shadow ROM changes (at reset and end of POST).
3199 */
3200REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3201{
3202 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3203 VM_ASSERT_EMT(pVM);
3204
3205 /*
3206 * Validate input - we trust the caller.
3207 */
3208 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3209 Assert(cb);
3210 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3211
3212 /*
3213 * Register the rom.
3214 */
3215 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3216
3217 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3218 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3219 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3220
3221 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3222}
3223
3224
3225/**
3226 * Notification about a successful memory deregistration or reservation.
3227 *
3228 * @param pVM VM Handle.
3229 * @param GCPhys Start physical address.
3230 * @param cb The size of the range.
3231 */
3232REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3233{
3234 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3235 VM_ASSERT_EMT(pVM);
3236
3237 /*
3238 * Validate input - we trust the caller.
3239 */
3240 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3241 Assert(cb);
3242 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3243
3244 /*
3245 * Unassigning the memory.
3246 */
3247 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3248
3249 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3250 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3251 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3252
3253 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3254}
3255
3256
3257/**
3258 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3259 *
3260 * @param pVM VM Handle.
3261 * @param enmType Handler type.
3262 * @param GCPhys Handler range address.
3263 * @param cb Size of the handler range.
3264 * @param fHasHCHandler Set if the handler has a HC callback function.
3265 *
3266 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3267 * Handler memory type to memory which has no HC handler.
3268 */
3269static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3270{
3271 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3272 enmType, GCPhys, cb, fHasHCHandler));
3273
3274 VM_ASSERT_EMT(pVM);
3275 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3276 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3277
3278
3279 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3280
3281 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3282 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3283 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3284 else if (fHasHCHandler)
3285 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3286 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3287
3288 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3289}
3290
3291/**
3292 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3293 *
3294 * @param pVM VM Handle.
3295 * @param enmType Handler type.
3296 * @param GCPhys Handler range address.
3297 * @param cb Size of the handler range.
3298 * @param fHasHCHandler Set if the handler has a HC callback function.
3299 *
3300 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3301 * Handler memory type to memory which has no HC handler.
3302 */
3303REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3304{
3305 REMR3ReplayHandlerNotifications(pVM);
3306
3307 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3308}
3309
3310/**
3311 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3312 *
3313 * @param pVM VM Handle.
3314 * @param enmType Handler type.
3315 * @param GCPhys Handler range address.
3316 * @param cb Size of the handler range.
3317 * @param fHasHCHandler Set if the handler has a HC callback function.
3318 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3319 */
3320static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3321{
3322 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3323 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3324 VM_ASSERT_EMT(pVM);
3325
3326
3327 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3328
3329 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3330 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3331 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3332 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3333 else if (fHasHCHandler)
3334 {
3335 if (!fRestoreAsRAM)
3336 {
3337 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3338 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3339 }
3340 else
3341 {
3342 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3343 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3344 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3345 }
3346 }
3347 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3348
3349 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3350}
3351
3352/**
3353 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3354 *
3355 * @param pVM VM Handle.
3356 * @param enmType Handler type.
3357 * @param GCPhys Handler range address.
3358 * @param cb Size of the handler range.
3359 * @param fHasHCHandler Set if the handler has a HC callback function.
3360 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3361 */
3362REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3363{
3364 REMR3ReplayHandlerNotifications(pVM);
3365 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3366}
3367
3368
3369/**
3370 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3371 *
3372 * @param pVM VM Handle.
3373 * @param enmType Handler type.
3374 * @param GCPhysOld Old handler range address.
3375 * @param GCPhysNew New handler range address.
3376 * @param cb Size of the handler range.
3377 * @param fHasHCHandler Set if the handler has a HC callback function.
3378 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3379 */
3380static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3381{
3382 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3383 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3384 VM_ASSERT_EMT(pVM);
3385 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3386
3387 if (fHasHCHandler)
3388 {
3389 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3390
3391 /*
3392 * Reset the old page.
3393 */
3394 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3395 if (!fRestoreAsRAM)
3396 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3397 else
3398 {
3399 /* This is not perfect, but it'll do for PD monitoring... */
3400 Assert(cb == PAGE_SIZE);
3401 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3402 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3403 }
3404
3405 /*
3406 * Update the new page.
3407 */
3408 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3409 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3410 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3411 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3412
3413 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3414 }
3415}
3416
3417/**
3418 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3419 *
3420 * @param pVM VM Handle.
3421 * @param enmType Handler type.
3422 * @param GCPhysOld Old handler range address.
3423 * @param GCPhysNew New handler range address.
3424 * @param cb Size of the handler range.
3425 * @param fHasHCHandler Set if the handler has a HC callback function.
3426 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3427 */
3428REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3429{
3430 REMR3ReplayHandlerNotifications(pVM);
3431
3432 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3433}
3434
3435/**
3436 * Checks if we're handling access to this page or not.
3437 *
3438 * @returns true if we're trapping access.
3439 * @returns false if we aren't.
3440 * @param pVM The VM handle.
3441 * @param GCPhys The physical address.
3442 *
3443 * @remark This function will only work correctly in VBOX_STRICT builds!
3444 */
3445REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3446{
3447#ifdef VBOX_STRICT
3448 unsigned long off;
3449 REMR3ReplayHandlerNotifications(pVM);
3450
3451 off = get_phys_page_offset(GCPhys);
3452 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3453 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3454 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3455#else
3456 return false;
3457#endif
3458}
3459
3460
3461/**
3462 * Deals with a rare case in get_phys_addr_code where the code
3463 * is being monitored.
3464 *
3465 * It could also be an MMIO page, in which case we will raise a fatal error.
3466 *
3467 * @returns The physical address corresponding to addr.
3468 * @param env The cpu environment.
3469 * @param addr The virtual address.
3470 * @param pTLBEntry The TLB entry.
3471 */
3472target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3473 target_ulong addr,
3474 CPUTLBEntry *pTLBEntry,
3475 target_phys_addr_t ioTLBEntry)
3476{
3477 PVM pVM = env->pVM;
3478
3479 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3480 {
3481 /* If code memory is being monitored, appropriate IOTLB entry will have
3482 handler IO type, and addend will provide real physical address, no
3483 matter if we store VA in TLB or not, as handlers are always passed PA */
3484 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3485 return ret;
3486 }
3487 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3488 "*** handlers\n",
3489 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3490 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3491 LogRel(("*** mmio\n"));
3492 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3493 LogRel(("*** phys\n"));
3494 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3495 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3496 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3497 AssertFatalFailed();
3498}
3499
3500/**
3501 * Read guest RAM and ROM.
3502 *
3503 * @param SrcGCPhys The source address (guest physical).
3504 * @param pvDst The destination address.
3505 * @param cb Number of bytes
3506 */
3507void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3508{
3509 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3510 VBOX_CHECK_ADDR(SrcGCPhys);
3511 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3512#ifdef VBOX_DEBUG_PHYS
3513 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3514#endif
3515 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3516}
3517
3518
3519/**
3520 * Read guest RAM and ROM, unsigned 8-bit.
3521 *
3522 * @param SrcGCPhys The source address (guest physical).
3523 */
3524RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3525{
3526 uint8_t val;
3527 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3528 VBOX_CHECK_ADDR(SrcGCPhys);
3529 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3530 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3531#ifdef VBOX_DEBUG_PHYS
3532 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3533#endif
3534 return val;
3535}
3536
3537
3538/**
3539 * Read guest RAM and ROM, signed 8-bit.
3540 *
3541 * @param SrcGCPhys The source address (guest physical).
3542 */
3543RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3544{
3545 int8_t val;
3546 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3547 VBOX_CHECK_ADDR(SrcGCPhys);
3548 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3549 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3550#ifdef VBOX_DEBUG_PHYS
3551 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3552#endif
3553 return val;
3554}
3555
3556
3557/**
3558 * Read guest RAM and ROM, unsigned 16-bit.
3559 *
3560 * @param SrcGCPhys The source address (guest physical).
3561 */
3562RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3563{
3564 uint16_t val;
3565 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3566 VBOX_CHECK_ADDR(SrcGCPhys);
3567 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3568 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3569#ifdef VBOX_DEBUG_PHYS
3570 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3571#endif
3572 return val;
3573}
3574
3575
3576/**
3577 * Read guest RAM and ROM, signed 16-bit.
3578 *
3579 * @param SrcGCPhys The source address (guest physical).
3580 */
3581RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3582{
3583 int16_t val;
3584 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3585 VBOX_CHECK_ADDR(SrcGCPhys);
3586 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3587 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3588#ifdef VBOX_DEBUG_PHYS
3589 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3590#endif
3591 return val;
3592}
3593
3594
3595/**
3596 * Read guest RAM and ROM, unsigned 32-bit.
3597 *
3598 * @param SrcGCPhys The source address (guest physical).
3599 */
3600RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3601{
3602 uint32_t val;
3603 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3604 VBOX_CHECK_ADDR(SrcGCPhys);
3605 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3606 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3607#ifdef VBOX_DEBUG_PHYS
3608 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3609#endif
3610 return val;
3611}
3612
3613
3614/**
3615 * Read guest RAM and ROM, signed 32-bit.
3616 *
3617 * @param SrcGCPhys The source address (guest physical).
3618 */
3619RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3620{
3621 int32_t val;
3622 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3623 VBOX_CHECK_ADDR(SrcGCPhys);
3624 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3625 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3626#ifdef VBOX_DEBUG_PHYS
3627 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3628#endif
3629 return val;
3630}
3631
3632
3633/**
3634 * Read guest RAM and ROM, unsigned 64-bit.
3635 *
3636 * @param SrcGCPhys The source address (guest physical).
3637 */
3638uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3639{
3640 uint64_t val;
3641 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3642 VBOX_CHECK_ADDR(SrcGCPhys);
3643 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3644 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3645#ifdef VBOX_DEBUG_PHYS
3646 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3647#endif
3648 return val;
3649}
3650
3651
3652/**
3653 * Read guest RAM and ROM, signed 64-bit.
3654 *
3655 * @param SrcGCPhys The source address (guest physical).
3656 */
3657int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3658{
3659 int64_t val;
3660 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3661 VBOX_CHECK_ADDR(SrcGCPhys);
3662 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3663 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3664#ifdef VBOX_DEBUG_PHYS
3665 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3666#endif
3667 return val;
3668}
3669
3670
3671/**
3672 * Write guest RAM.
3673 *
3674 * @param DstGCPhys The destination address (guest physical).
3675 * @param pvSrc The source address.
3676 * @param cb Number of bytes to write
3677 */
3678void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3679{
3680 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3681 VBOX_CHECK_ADDR(DstGCPhys);
3682 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3683 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3684#ifdef VBOX_DEBUG_PHYS
3685 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3686#endif
3687}
3688
3689
3690/**
3691 * Write guest RAM, unsigned 8-bit.
3692 *
3693 * @param DstGCPhys The destination address (guest physical).
3694 * @param val Value
3695 */
3696void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3697{
3698 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3699 VBOX_CHECK_ADDR(DstGCPhys);
3700 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3701 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3702#ifdef VBOX_DEBUG_PHYS
3703 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3704#endif
3705}
3706
3707
3708/**
3709 * Write guest RAM, unsigned 8-bit.
3710 *
3711 * @param DstGCPhys The destination address (guest physical).
3712 * @param val Value
3713 */
3714void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3715{
3716 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3717 VBOX_CHECK_ADDR(DstGCPhys);
3718 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3719 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3720#ifdef VBOX_DEBUG_PHYS
3721 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3722#endif
3723}
3724
3725
3726/**
3727 * Write guest RAM, unsigned 32-bit.
3728 *
3729 * @param DstGCPhys The destination address (guest physical).
3730 * @param val Value
3731 */
3732void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3733{
3734 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3735 VBOX_CHECK_ADDR(DstGCPhys);
3736 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3737 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3738#ifdef VBOX_DEBUG_PHYS
3739 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3740#endif
3741}
3742
3743
3744/**
3745 * Write guest RAM, unsigned 64-bit.
3746 *
3747 * @param DstGCPhys The destination address (guest physical).
3748 * @param val Value
3749 */
3750void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3751{
3752 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3753 VBOX_CHECK_ADDR(DstGCPhys);
3754 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3755 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3756#ifdef VBOX_DEBUG_PHYS
3757 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3758#endif
3759}
3760
3761#undef LOG_GROUP
3762#define LOG_GROUP LOG_GROUP_REM_MMIO
3763
3764/** Read MMIO memory. */
3765static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3766{
3767 uint32_t u32 = 0;
3768 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3769 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3770 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3771 return u32;
3772}
3773
3774/** Read MMIO memory. */
3775static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3776{
3777 uint32_t u32 = 0;
3778 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3779 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3780 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3781 return u32;
3782}
3783
3784/** Read MMIO memory. */
3785static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3786{
3787 uint32_t u32 = 0;
3788 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3789 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3790 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3791 return u32;
3792}
3793
3794/** Write to MMIO memory. */
3795static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3796{
3797 int rc;
3798 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3799 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3800 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3801}
3802
3803/** Write to MMIO memory. */
3804static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3805{
3806 int rc;
3807 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3808 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3809 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3810}
3811
3812/** Write to MMIO memory. */
3813static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3814{
3815 int rc;
3816 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3817 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3818 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3819}
3820
3821
3822#undef LOG_GROUP
3823#define LOG_GROUP LOG_GROUP_REM_HANDLER
3824
3825/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3826
3827static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3828{
3829 uint8_t u8;
3830 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3831 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3832 return u8;
3833}
3834
3835static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3836{
3837 uint16_t u16;
3838 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3839 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3840 return u16;
3841}
3842
3843static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3844{
3845 uint32_t u32;
3846 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3847 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3848 return u32;
3849}
3850
3851static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3852{
3853 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3854 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3855}
3856
3857static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3858{
3859 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3860 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3861}
3862
3863static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3864{
3865 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3866 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3867}
3868
3869/* -+- disassembly -+- */
3870
3871#undef LOG_GROUP
3872#define LOG_GROUP LOG_GROUP_REM_DISAS
3873
3874
3875/**
3876 * Enables or disables singled stepped disassembly.
3877 *
3878 * @returns VBox status code.
3879 * @param pVM VM handle.
3880 * @param fEnable To enable set this flag, to disable clear it.
3881 */
3882static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3883{
3884 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3885 VM_ASSERT_EMT(pVM);
3886
3887 if (fEnable)
3888 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3889 else
3890 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3891#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3892 cpu_single_step(&pVM->rem.s.Env, fEnable);
3893#endif
3894 return VINF_SUCCESS;
3895}
3896
3897
3898/**
3899 * Enables or disables singled stepped disassembly.
3900 *
3901 * @returns VBox status code.
3902 * @param pVM VM handle.
3903 * @param fEnable To enable set this flag, to disable clear it.
3904 */
3905REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3906{
3907 int rc;
3908
3909 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3910 if (VM_IS_EMT(pVM))
3911 return remR3DisasEnableStepping(pVM, fEnable);
3912
3913 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3914 AssertRC(rc);
3915 return rc;
3916}
3917
3918
3919#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3920/**
3921 * External Debugger Command: .remstep [on|off|1|0]
3922 */
3923static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3924{
3925 int rc;
3926
3927 if (cArgs == 0)
3928 /*
3929 * Print the current status.
3930 */
3931 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3932 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3933 else
3934 {
3935 /*
3936 * Convert the argument and change the mode.
3937 */
3938 bool fEnable;
3939 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3940 if (RT_SUCCESS(rc))
3941 {
3942 rc = REMR3DisasEnableStepping(pVM, fEnable);
3943 if (RT_SUCCESS(rc))
3944 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3945 else
3946 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3947 }
3948 else
3949 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3950 }
3951 return rc;
3952}
3953#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3954
3955
3956/**
3957 * Disassembles one instruction and prints it to the log.
3958 *
3959 * @returns Success indicator.
3960 * @param env Pointer to the recompiler CPU structure.
3961 * @param f32BitCode Indicates that whether or not the code should
3962 * be disassembled as 16 or 32 bit. If -1 the CS
3963 * selector will be inspected.
3964 * @param pszPrefix
3965 */
3966bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3967{
3968 PVM pVM = env->pVM;
3969 const bool fLog = LogIsEnabled();
3970 const bool fLog2 = LogIs2Enabled();
3971 int rc = VINF_SUCCESS;
3972
3973 /*
3974 * Don't bother if there ain't any log output to do.
3975 */
3976 if (!fLog && !fLog2)
3977 return true;
3978
3979 /*
3980 * Update the state so DBGF reads the correct register values.
3981 */
3982 remR3StateUpdate(pVM, env->pVCpu);
3983
3984 /*
3985 * Log registers if requested.
3986 */
3987 if (fLog2)
3988 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3989
3990 /*
3991 * Disassemble to log.
3992 */
3993 if (fLog)
3994 {
3995 PVMCPU pVCpu = VMMGetCpu(pVM);
3996 char szBuf[256];
3997 szBuf[0] = '\0';
3998 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3999 pVCpu->idCpu,
4000 0, /* Sel */
4001 0, /* GCPtr */
4002 DBGF_DISAS_FLAGS_CURRENT_GUEST
4003 | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4004 szBuf,
4005 sizeof(szBuf),
4006 NULL);
4007 if (RT_FAILURE(rc))
4008 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4009 if (pszPrefix && *pszPrefix)
4010 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4011 else
4012 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4013 }
4014
4015 return RT_SUCCESS(rc);
4016}
4017
4018
4019/**
4020 * Disassemble recompiled code.
4021 *
4022 * @param phFileIgnored Ignored, logfile usually.
4023 * @param pvCode Pointer to the code block.
4024 * @param cb Size of the code block.
4025 */
4026void disas(FILE *phFile, void *pvCode, unsigned long cb)
4027{
4028 if (LogIs2Enabled())
4029 {
4030 unsigned off = 0;
4031 char szOutput[256];
4032 DISCPUSTATE Cpu;
4033#ifdef RT_ARCH_X86
4034 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
4035#else
4036 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
4037#endif
4038
4039 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4040 while (off < cb)
4041 {
4042 uint32_t cbInstr;
4043 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
4044 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
4045 if (RT_SUCCESS(rc))
4046 RTLogPrintf("%s", szOutput);
4047 else
4048 {
4049 RTLogPrintf("disas error %Rrc\n", rc);
4050 cbInstr = 1;
4051 }
4052 off += cbInstr;
4053 }
4054 }
4055}
4056
4057
4058/**
4059 * Disassemble guest code.
4060 *
4061 * @param phFileIgnored Ignored, logfile usually.
4062 * @param uCode The guest address of the code to disassemble. (flat?)
4063 * @param cb Number of bytes to disassemble.
4064 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4065 */
4066void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4067{
4068 if (LogIs2Enabled())
4069 {
4070 PVM pVM = cpu_single_env->pVM;
4071 PVMCPU pVCpu = cpu_single_env->pVCpu;
4072 RTSEL cs;
4073 RTGCUINTPTR eip;
4074
4075 Assert(pVCpu);
4076
4077 /*
4078 * Update the state so DBGF reads the correct register values (flags).
4079 */
4080 remR3StateUpdate(pVM, pVCpu);
4081
4082 /*
4083 * Do the disassembling.
4084 */
4085 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4086 cs = cpu_single_env->segs[R_CS].selector;
4087 eip = uCode - cpu_single_env->segs[R_CS].base;
4088 for (;;)
4089 {
4090 char szBuf[256];
4091 uint32_t cbInstr;
4092 int rc = DBGFR3DisasInstrEx(pVM,
4093 pVCpu->idCpu,
4094 cs,
4095 eip,
4096 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4097 szBuf, sizeof(szBuf),
4098 &cbInstr);
4099 if (RT_SUCCESS(rc))
4100 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4101 else
4102 {
4103 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4104 cbInstr = 1;
4105 }
4106
4107 /* next */
4108 if (cb <= cbInstr)
4109 break;
4110 cb -= cbInstr;
4111 uCode += cbInstr;
4112 eip += cbInstr;
4113 }
4114 }
4115}
4116
4117
4118/**
4119 * Looks up a guest symbol.
4120 *
4121 * @returns Pointer to symbol name. This is a static buffer.
4122 * @param orig_addr The address in question.
4123 */
4124const char *lookup_symbol(target_ulong orig_addr)
4125{
4126 PVM pVM = cpu_single_env->pVM;
4127 RTGCINTPTR off = 0;
4128 RTDBGSYMBOL Sym;
4129 DBGFADDRESS Addr;
4130
4131 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4132 if (RT_SUCCESS(rc))
4133 {
4134 static char szSym[sizeof(Sym.szName) + 48];
4135 if (!off)
4136 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4137 else if (off > 0)
4138 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4139 else
4140 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4141 return szSym;
4142 }
4143 return "<N/A>";
4144}
4145
4146
4147#undef LOG_GROUP
4148#define LOG_GROUP LOG_GROUP_REM
4149
4150
4151/* -+- FF notifications -+- */
4152
4153
4154/**
4155 * Notification about a pending interrupt.
4156 *
4157 * @param pVM VM Handle.
4158 * @param pVCpu VMCPU Handle.
4159 * @param u8Interrupt Interrupt
4160 * @thread The emulation thread.
4161 */
4162REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4163{
4164 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4165 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4166}
4167
4168/**
4169 * Notification about a pending interrupt.
4170 *
4171 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4172 * @param pVM VM Handle.
4173 * @param pVCpu VMCPU Handle.
4174 * @thread The emulation thread.
4175 */
4176REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4177{
4178 return pVM->rem.s.u32PendingInterrupt;
4179}
4180
4181/**
4182 * Notification about the interrupt FF being set.
4183 *
4184 * @param pVM VM Handle.
4185 * @param pVCpu VMCPU Handle.
4186 * @thread The emulation thread.
4187 */
4188REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4189{
4190#ifndef IEM_VERIFICATION_MODE
4191 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4192 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4193 if (pVM->rem.s.fInREM)
4194 {
4195 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4196 CPU_INTERRUPT_EXTERNAL_HARD);
4197 }
4198#endif
4199}
4200
4201
4202/**
4203 * Notification about the interrupt FF being set.
4204 *
4205 * @param pVM VM Handle.
4206 * @param pVCpu VMCPU Handle.
4207 * @thread Any.
4208 */
4209REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4210{
4211 LogFlow(("REMR3NotifyInterruptClear:\n"));
4212 if (pVM->rem.s.fInREM)
4213 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4214}
4215
4216
4217/**
4218 * Notification about pending timer(s).
4219 *
4220 * @param pVM VM Handle.
4221 * @param pVCpuDst The target cpu for this notification.
4222 * TM will not broadcast pending timer events, but use
4223 * a dedicated EMT for them. So, only interrupt REM
4224 * execution if the given CPU is executing in REM.
4225 * @thread Any.
4226 */
4227REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4228{
4229#ifndef IEM_VERIFICATION_MODE
4230#ifndef DEBUG_bird
4231 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4232#endif
4233 if (pVM->rem.s.fInREM)
4234 {
4235 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4236 {
4237 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4238 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4239 CPU_INTERRUPT_EXTERNAL_TIMER);
4240 }
4241 else
4242 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4243 }
4244 else
4245 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4246#endif
4247}
4248
4249
4250/**
4251 * Notification about pending DMA transfers.
4252 *
4253 * @param pVM VM Handle.
4254 * @thread Any.
4255 */
4256REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4257{
4258#ifndef IEM_VERIFICATION_MODE
4259 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4260 if (pVM->rem.s.fInREM)
4261 {
4262 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4263 CPU_INTERRUPT_EXTERNAL_DMA);
4264 }
4265#endif
4266}
4267
4268
4269/**
4270 * Notification about pending timer(s).
4271 *
4272 * @param pVM VM Handle.
4273 * @thread Any.
4274 */
4275REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4276{
4277#ifndef IEM_VERIFICATION_MODE
4278 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4279 if (pVM->rem.s.fInREM)
4280 {
4281 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4282 CPU_INTERRUPT_EXTERNAL_EXIT);
4283 }
4284#endif
4285}
4286
4287
4288/**
4289 * Notification about pending FF set by an external thread.
4290 *
4291 * @param pVM VM handle.
4292 * @thread Any.
4293 */
4294REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4295{
4296#ifndef IEM_VERIFICATION_MODE
4297 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4298 if (pVM->rem.s.fInREM)
4299 {
4300 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4301 CPU_INTERRUPT_EXTERNAL_EXIT);
4302 }
4303#endif
4304}
4305
4306
4307#ifdef VBOX_WITH_STATISTICS
4308void remR3ProfileStart(int statcode)
4309{
4310 STAMPROFILEADV *pStat;
4311 switch(statcode)
4312 {
4313 case STATS_EMULATE_SINGLE_INSTR:
4314 pStat = &gStatExecuteSingleInstr;
4315 break;
4316 case STATS_QEMU_COMPILATION:
4317 pStat = &gStatCompilationQEmu;
4318 break;
4319 case STATS_QEMU_RUN_EMULATED_CODE:
4320 pStat = &gStatRunCodeQEmu;
4321 break;
4322 case STATS_QEMU_TOTAL:
4323 pStat = &gStatTotalTimeQEmu;
4324 break;
4325 case STATS_QEMU_RUN_TIMERS:
4326 pStat = &gStatTimers;
4327 break;
4328 case STATS_TLB_LOOKUP:
4329 pStat= &gStatTBLookup;
4330 break;
4331 case STATS_IRQ_HANDLING:
4332 pStat= &gStatIRQ;
4333 break;
4334 case STATS_RAW_CHECK:
4335 pStat = &gStatRawCheck;
4336 break;
4337
4338 default:
4339 AssertMsgFailed(("unknown stat %d\n", statcode));
4340 return;
4341 }
4342 STAM_PROFILE_ADV_START(pStat, a);
4343}
4344
4345
4346void remR3ProfileStop(int statcode)
4347{
4348 STAMPROFILEADV *pStat;
4349 switch(statcode)
4350 {
4351 case STATS_EMULATE_SINGLE_INSTR:
4352 pStat = &gStatExecuteSingleInstr;
4353 break;
4354 case STATS_QEMU_COMPILATION:
4355 pStat = &gStatCompilationQEmu;
4356 break;
4357 case STATS_QEMU_RUN_EMULATED_CODE:
4358 pStat = &gStatRunCodeQEmu;
4359 break;
4360 case STATS_QEMU_TOTAL:
4361 pStat = &gStatTotalTimeQEmu;
4362 break;
4363 case STATS_QEMU_RUN_TIMERS:
4364 pStat = &gStatTimers;
4365 break;
4366 case STATS_TLB_LOOKUP:
4367 pStat= &gStatTBLookup;
4368 break;
4369 case STATS_IRQ_HANDLING:
4370 pStat= &gStatIRQ;
4371 break;
4372 case STATS_RAW_CHECK:
4373 pStat = &gStatRawCheck;
4374 break;
4375 default:
4376 AssertMsgFailed(("unknown stat %d\n", statcode));
4377 return;
4378 }
4379 STAM_PROFILE_ADV_STOP(pStat, a);
4380}
4381#endif
4382
4383/**
4384 * Raise an RC, force rem exit.
4385 *
4386 * @param pVM VM handle.
4387 * @param rc The rc.
4388 */
4389void remR3RaiseRC(PVM pVM, int rc)
4390{
4391 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4392 Assert(pVM->rem.s.fInREM);
4393 VM_ASSERT_EMT(pVM);
4394 pVM->rem.s.rc = rc;
4395 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4396}
4397
4398
4399/* -+- timers -+- */
4400
4401uint64_t cpu_get_tsc(CPUX86State *env)
4402{
4403 STAM_COUNTER_INC(&gStatCpuGetTSC);
4404 return TMCpuTickGet(env->pVCpu);
4405}
4406
4407
4408/* -+- interrupts -+- */
4409
4410void cpu_set_ferr(CPUX86State *env)
4411{
4412 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4413 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4414}
4415
4416int cpu_get_pic_interrupt(CPUX86State *env)
4417{
4418 uint8_t u8Interrupt;
4419 int rc;
4420
4421 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4422 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4423 * with the (a)pic.
4424 */
4425 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4426 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4427 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4428 * remove this kludge. */
4429 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4430 {
4431 rc = VINF_SUCCESS;
4432 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4433 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4434 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4435 }
4436 else
4437 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4438
4439 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4440 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4441 if (RT_SUCCESS(rc))
4442 {
4443 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4444 env->interrupt_request |= CPU_INTERRUPT_HARD;
4445 return u8Interrupt;
4446 }
4447 return -1;
4448}
4449
4450
4451/* -+- local apic -+- */
4452
4453#if 0 /* CPUMSetGuestMsr does this now. */
4454void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4455{
4456 int rc = PDMApicSetBase(env->pVM, val);
4457 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4458}
4459#endif
4460
4461uint64_t cpu_get_apic_base(CPUX86State *env)
4462{
4463 uint64_t u64;
4464 int rc = PDMApicGetBase(env->pVM, &u64);
4465 if (RT_SUCCESS(rc))
4466 {
4467 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4468 return u64;
4469 }
4470 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4471 return 0;
4472}
4473
4474void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4475{
4476 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4477 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4478}
4479
4480uint8_t cpu_get_apic_tpr(CPUX86State *env)
4481{
4482 uint8_t u8;
4483 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4484 if (RT_SUCCESS(rc))
4485 {
4486 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4487 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4488 }
4489 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4490 return 0;
4491}
4492
4493/**
4494 * Read an MSR.
4495 *
4496 * @retval 0 success.
4497 * @retval -1 failure, raise \#GP(0).
4498 * @param env The cpu state.
4499 * @param idMsr The MSR to read.
4500 * @param puValue Where to return the value.
4501 */
4502int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4503{
4504 Assert(env->pVCpu);
4505 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4506}
4507
4508/**
4509 * Write to an MSR.
4510 *
4511 * @retval 0 success.
4512 * @retval -1 failure, raise \#GP(0).
4513 * @param env The cpu state.
4514 * @param idMsr The MSR to read.
4515 * @param puValue Where to return the value.
4516 */
4517int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4518{
4519 Assert(env->pVCpu);
4520 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4521}
4522
4523/* -+- I/O Ports -+- */
4524
4525#undef LOG_GROUP
4526#define LOG_GROUP LOG_GROUP_REM_IOPORT
4527
4528void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4529{
4530 int rc;
4531
4532 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4533 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4534
4535 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4536 if (RT_LIKELY(rc == VINF_SUCCESS))
4537 return;
4538 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4539 {
4540 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4541 remR3RaiseRC(env->pVM, rc);
4542 return;
4543 }
4544 remAbort(rc, __FUNCTION__);
4545}
4546
4547void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4548{
4549 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4550 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4551 if (RT_LIKELY(rc == VINF_SUCCESS))
4552 return;
4553 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4554 {
4555 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4556 remR3RaiseRC(env->pVM, rc);
4557 return;
4558 }
4559 remAbort(rc, __FUNCTION__);
4560}
4561
4562void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4563{
4564 int rc;
4565 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4566 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4567 if (RT_LIKELY(rc == VINF_SUCCESS))
4568 return;
4569 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4570 {
4571 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4572 remR3RaiseRC(env->pVM, rc);
4573 return;
4574 }
4575 remAbort(rc, __FUNCTION__);
4576}
4577
4578uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4579{
4580 uint32_t u32 = 0;
4581 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4582 if (RT_LIKELY(rc == VINF_SUCCESS))
4583 {
4584 if (/*addr != 0x61 && */addr != 0x71)
4585 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4586 return (uint8_t)u32;
4587 }
4588 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4589 {
4590 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4591 remR3RaiseRC(env->pVM, rc);
4592 return (uint8_t)u32;
4593 }
4594 remAbort(rc, __FUNCTION__);
4595 return UINT8_C(0xff);
4596}
4597
4598uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4599{
4600 uint32_t u32 = 0;
4601 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4602 if (RT_LIKELY(rc == VINF_SUCCESS))
4603 {
4604 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4605 return (uint16_t)u32;
4606 }
4607 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4608 {
4609 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4610 remR3RaiseRC(env->pVM, rc);
4611 return (uint16_t)u32;
4612 }
4613 remAbort(rc, __FUNCTION__);
4614 return UINT16_C(0xffff);
4615}
4616
4617uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4618{
4619 uint32_t u32 = 0;
4620 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4621 if (RT_LIKELY(rc == VINF_SUCCESS))
4622 {
4623//if (addr==0x01f0 && u32 == 0x6b6d)
4624// loglevel = ~0;
4625 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4626 return u32;
4627 }
4628 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4629 {
4630 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4631 remR3RaiseRC(env->pVM, rc);
4632 return u32;
4633 }
4634 remAbort(rc, __FUNCTION__);
4635 return UINT32_C(0xffffffff);
4636}
4637
4638#undef LOG_GROUP
4639#define LOG_GROUP LOG_GROUP_REM
4640
4641
4642/* -+- helpers and misc other interfaces -+- */
4643
4644/**
4645 * Perform the CPUID instruction.
4646 *
4647 * @param env Pointer to the recompiler CPU structure.
4648 * @param idx The CPUID leaf (eax).
4649 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4650 * @param pvEAX Where to store eax.
4651 * @param pvEBX Where to store ebx.
4652 * @param pvECX Where to store ecx.
4653 * @param pvEDX Where to store edx.
4654 */
4655void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4656 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4657{
4658 NOREF(idxSub);
4659 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4660}
4661
4662
4663#if 0 /* not used */
4664/**
4665 * Interface for qemu hardware to report back fatal errors.
4666 */
4667void hw_error(const char *pszFormat, ...)
4668{
4669 /*
4670 * Bitch about it.
4671 */
4672 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4673 * this in my Odin32 tree at home! */
4674 va_list args;
4675 va_start(args, pszFormat);
4676 RTLogPrintf("fatal error in virtual hardware:");
4677 RTLogPrintfV(pszFormat, args);
4678 va_end(args);
4679 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4680
4681 /*
4682 * If we're in REM context we'll sync back the state before 'jumping' to
4683 * the EMs failure handling.
4684 */
4685 PVM pVM = cpu_single_env->pVM;
4686 if (pVM->rem.s.fInREM)
4687 REMR3StateBack(pVM);
4688 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4689 AssertMsgFailed(("EMR3FatalError returned!\n"));
4690}
4691#endif
4692
4693/**
4694 * Interface for the qemu cpu to report unhandled situation
4695 * raising a fatal VM error.
4696 */
4697void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4698{
4699 va_list va;
4700 PVM pVM;
4701 PVMCPU pVCpu;
4702 char szMsg[256];
4703
4704 /*
4705 * Bitch about it.
4706 */
4707 RTLogFlags(NULL, "nodisabled nobuffered");
4708 RTLogFlush(NULL);
4709
4710 va_start(va, pszFormat);
4711#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4712 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4713 unsigned cArgs = 0;
4714 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4715 const char *psz = strchr(pszFormat, '%');
4716 while (psz && cArgs < 6)
4717 {
4718 auArgs[cArgs++] = va_arg(va, uintptr_t);
4719 psz = strchr(psz + 1, '%');
4720 }
4721 switch (cArgs)
4722 {
4723 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4724 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4725 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4726 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4727 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4728 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4729 default:
4730 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4731 }
4732#else
4733 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4734#endif
4735 va_end(va);
4736
4737 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4738 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4739
4740 /*
4741 * If we're in REM context we'll sync back the state before 'jumping' to
4742 * the EMs failure handling.
4743 */
4744 pVM = cpu_single_env->pVM;
4745 pVCpu = cpu_single_env->pVCpu;
4746 Assert(pVCpu);
4747
4748 if (pVM->rem.s.fInREM)
4749 REMR3StateBack(pVM, pVCpu);
4750 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4751 AssertMsgFailed(("EMR3FatalError returned!\n"));
4752}
4753
4754
4755/**
4756 * Aborts the VM.
4757 *
4758 * @param rc VBox error code.
4759 * @param pszTip Hint about why/when this happened.
4760 */
4761void remAbort(int rc, const char *pszTip)
4762{
4763 PVM pVM;
4764 PVMCPU pVCpu;
4765
4766 /*
4767 * Bitch about it.
4768 */
4769 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4770 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4771
4772 /*
4773 * Jump back to where we entered the recompiler.
4774 */
4775 pVM = cpu_single_env->pVM;
4776 pVCpu = cpu_single_env->pVCpu;
4777 Assert(pVCpu);
4778
4779 if (pVM->rem.s.fInREM)
4780 REMR3StateBack(pVM, pVCpu);
4781
4782 EMR3FatalError(pVCpu, rc);
4783 AssertMsgFailed(("EMR3FatalError returned!\n"));
4784}
4785
4786
4787/**
4788 * Dumps a linux system call.
4789 * @param pVCpu VMCPU handle.
4790 */
4791void remR3DumpLnxSyscall(PVMCPU pVCpu)
4792{
4793 static const char *apsz[] =
4794 {
4795 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4796 "sys_exit",
4797 "sys_fork",
4798 "sys_read",
4799 "sys_write",
4800 "sys_open", /* 5 */
4801 "sys_close",
4802 "sys_waitpid",
4803 "sys_creat",
4804 "sys_link",
4805 "sys_unlink", /* 10 */
4806 "sys_execve",
4807 "sys_chdir",
4808 "sys_time",
4809 "sys_mknod",
4810 "sys_chmod", /* 15 */
4811 "sys_lchown16",
4812 "sys_ni_syscall", /* old break syscall holder */
4813 "sys_stat",
4814 "sys_lseek",
4815 "sys_getpid", /* 20 */
4816 "sys_mount",
4817 "sys_oldumount",
4818 "sys_setuid16",
4819 "sys_getuid16",
4820 "sys_stime", /* 25 */
4821 "sys_ptrace",
4822 "sys_alarm",
4823 "sys_fstat",
4824 "sys_pause",
4825 "sys_utime", /* 30 */
4826 "sys_ni_syscall", /* old stty syscall holder */
4827 "sys_ni_syscall", /* old gtty syscall holder */
4828 "sys_access",
4829 "sys_nice",
4830 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4831 "sys_sync",
4832 "sys_kill",
4833 "sys_rename",
4834 "sys_mkdir",
4835 "sys_rmdir", /* 40 */
4836 "sys_dup",
4837 "sys_pipe",
4838 "sys_times",
4839 "sys_ni_syscall", /* old prof syscall holder */
4840 "sys_brk", /* 45 */
4841 "sys_setgid16",
4842 "sys_getgid16",
4843 "sys_signal",
4844 "sys_geteuid16",
4845 "sys_getegid16", /* 50 */
4846 "sys_acct",
4847 "sys_umount", /* recycled never used phys() */
4848 "sys_ni_syscall", /* old lock syscall holder */
4849 "sys_ioctl",
4850 "sys_fcntl", /* 55 */
4851 "sys_ni_syscall", /* old mpx syscall holder */
4852 "sys_setpgid",
4853 "sys_ni_syscall", /* old ulimit syscall holder */
4854 "sys_olduname",
4855 "sys_umask", /* 60 */
4856 "sys_chroot",
4857 "sys_ustat",
4858 "sys_dup2",
4859 "sys_getppid",
4860 "sys_getpgrp", /* 65 */
4861 "sys_setsid",
4862 "sys_sigaction",
4863 "sys_sgetmask",
4864 "sys_ssetmask",
4865 "sys_setreuid16", /* 70 */
4866 "sys_setregid16",
4867 "sys_sigsuspend",
4868 "sys_sigpending",
4869 "sys_sethostname",
4870 "sys_setrlimit", /* 75 */
4871 "sys_old_getrlimit",
4872 "sys_getrusage",
4873 "sys_gettimeofday",
4874 "sys_settimeofday",
4875 "sys_getgroups16", /* 80 */
4876 "sys_setgroups16",
4877 "old_select",
4878 "sys_symlink",
4879 "sys_lstat",
4880 "sys_readlink", /* 85 */
4881 "sys_uselib",
4882 "sys_swapon",
4883 "sys_reboot",
4884 "old_readdir",
4885 "old_mmap", /* 90 */
4886 "sys_munmap",
4887 "sys_truncate",
4888 "sys_ftruncate",
4889 "sys_fchmod",
4890 "sys_fchown16", /* 95 */
4891 "sys_getpriority",
4892 "sys_setpriority",
4893 "sys_ni_syscall", /* old profil syscall holder */
4894 "sys_statfs",
4895 "sys_fstatfs", /* 100 */
4896 "sys_ioperm",
4897 "sys_socketcall",
4898 "sys_syslog",
4899 "sys_setitimer",
4900 "sys_getitimer", /* 105 */
4901 "sys_newstat",
4902 "sys_newlstat",
4903 "sys_newfstat",
4904 "sys_uname",
4905 "sys_iopl", /* 110 */
4906 "sys_vhangup",
4907 "sys_ni_syscall", /* old "idle" system call */
4908 "sys_vm86old",
4909 "sys_wait4",
4910 "sys_swapoff", /* 115 */
4911 "sys_sysinfo",
4912 "sys_ipc",
4913 "sys_fsync",
4914 "sys_sigreturn",
4915 "sys_clone", /* 120 */
4916 "sys_setdomainname",
4917 "sys_newuname",
4918 "sys_modify_ldt",
4919 "sys_adjtimex",
4920 "sys_mprotect", /* 125 */
4921 "sys_sigprocmask",
4922 "sys_ni_syscall", /* old "create_module" */
4923 "sys_init_module",
4924 "sys_delete_module",
4925 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4926 "sys_quotactl",
4927 "sys_getpgid",
4928 "sys_fchdir",
4929 "sys_bdflush",
4930 "sys_sysfs", /* 135 */
4931 "sys_personality",
4932 "sys_ni_syscall", /* reserved for afs_syscall */
4933 "sys_setfsuid16",
4934 "sys_setfsgid16",
4935 "sys_llseek", /* 140 */
4936 "sys_getdents",
4937 "sys_select",
4938 "sys_flock",
4939 "sys_msync",
4940 "sys_readv", /* 145 */
4941 "sys_writev",
4942 "sys_getsid",
4943 "sys_fdatasync",
4944 "sys_sysctl",
4945 "sys_mlock", /* 150 */
4946 "sys_munlock",
4947 "sys_mlockall",
4948 "sys_munlockall",
4949 "sys_sched_setparam",
4950 "sys_sched_getparam", /* 155 */
4951 "sys_sched_setscheduler",
4952 "sys_sched_getscheduler",
4953 "sys_sched_yield",
4954 "sys_sched_get_priority_max",
4955 "sys_sched_get_priority_min", /* 160 */
4956 "sys_sched_rr_get_interval",
4957 "sys_nanosleep",
4958 "sys_mremap",
4959 "sys_setresuid16",
4960 "sys_getresuid16", /* 165 */
4961 "sys_vm86",
4962 "sys_ni_syscall", /* Old sys_query_module */
4963 "sys_poll",
4964 "sys_nfsservctl",
4965 "sys_setresgid16", /* 170 */
4966 "sys_getresgid16",
4967 "sys_prctl",
4968 "sys_rt_sigreturn",
4969 "sys_rt_sigaction",
4970 "sys_rt_sigprocmask", /* 175 */
4971 "sys_rt_sigpending",
4972 "sys_rt_sigtimedwait",
4973 "sys_rt_sigqueueinfo",
4974 "sys_rt_sigsuspend",
4975 "sys_pread64", /* 180 */
4976 "sys_pwrite64",
4977 "sys_chown16",
4978 "sys_getcwd",
4979 "sys_capget",
4980 "sys_capset", /* 185 */
4981 "sys_sigaltstack",
4982 "sys_sendfile",
4983 "sys_ni_syscall", /* reserved for streams1 */
4984 "sys_ni_syscall", /* reserved for streams2 */
4985 "sys_vfork", /* 190 */
4986 "sys_getrlimit",
4987 "sys_mmap2",
4988 "sys_truncate64",
4989 "sys_ftruncate64",
4990 "sys_stat64", /* 195 */
4991 "sys_lstat64",
4992 "sys_fstat64",
4993 "sys_lchown",
4994 "sys_getuid",
4995 "sys_getgid", /* 200 */
4996 "sys_geteuid",
4997 "sys_getegid",
4998 "sys_setreuid",
4999 "sys_setregid",
5000 "sys_getgroups", /* 205 */
5001 "sys_setgroups",
5002 "sys_fchown",
5003 "sys_setresuid",
5004 "sys_getresuid",
5005 "sys_setresgid", /* 210 */
5006 "sys_getresgid",
5007 "sys_chown",
5008 "sys_setuid",
5009 "sys_setgid",
5010 "sys_setfsuid", /* 215 */
5011 "sys_setfsgid",
5012 "sys_pivot_root",
5013 "sys_mincore",
5014 "sys_madvise",
5015 "sys_getdents64", /* 220 */
5016 "sys_fcntl64",
5017 "sys_ni_syscall", /* reserved for TUX */
5018 "sys_ni_syscall",
5019 "sys_gettid",
5020 "sys_readahead", /* 225 */
5021 "sys_setxattr",
5022 "sys_lsetxattr",
5023 "sys_fsetxattr",
5024 "sys_getxattr",
5025 "sys_lgetxattr", /* 230 */
5026 "sys_fgetxattr",
5027 "sys_listxattr",
5028 "sys_llistxattr",
5029 "sys_flistxattr",
5030 "sys_removexattr", /* 235 */
5031 "sys_lremovexattr",
5032 "sys_fremovexattr",
5033 "sys_tkill",
5034 "sys_sendfile64",
5035 "sys_futex", /* 240 */
5036 "sys_sched_setaffinity",
5037 "sys_sched_getaffinity",
5038 "sys_set_thread_area",
5039 "sys_get_thread_area",
5040 "sys_io_setup", /* 245 */
5041 "sys_io_destroy",
5042 "sys_io_getevents",
5043 "sys_io_submit",
5044 "sys_io_cancel",
5045 "sys_fadvise64", /* 250 */
5046 "sys_ni_syscall",
5047 "sys_exit_group",
5048 "sys_lookup_dcookie",
5049 "sys_epoll_create",
5050 "sys_epoll_ctl", /* 255 */
5051 "sys_epoll_wait",
5052 "sys_remap_file_pages",
5053 "sys_set_tid_address",
5054 "sys_timer_create",
5055 "sys_timer_settime", /* 260 */
5056 "sys_timer_gettime",
5057 "sys_timer_getoverrun",
5058 "sys_timer_delete",
5059 "sys_clock_settime",
5060 "sys_clock_gettime", /* 265 */
5061 "sys_clock_getres",
5062 "sys_clock_nanosleep",
5063 "sys_statfs64",
5064 "sys_fstatfs64",
5065 "sys_tgkill", /* 270 */
5066 "sys_utimes",
5067 "sys_fadvise64_64",
5068 "sys_ni_syscall" /* sys_vserver */
5069 };
5070
5071 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5072 switch (uEAX)
5073 {
5074 default:
5075 if (uEAX < RT_ELEMENTS(apsz))
5076 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5077 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5078 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5079 else
5080 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5081 break;
5082
5083 }
5084}
5085
5086
5087/**
5088 * Dumps an OpenBSD system call.
5089 * @param pVCpu VMCPU handle.
5090 */
5091void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5092{
5093 static const char *apsz[] =
5094 {
5095 "SYS_syscall", //0
5096 "SYS_exit", //1
5097 "SYS_fork", //2
5098 "SYS_read", //3
5099 "SYS_write", //4
5100 "SYS_open", //5
5101 "SYS_close", //6
5102 "SYS_wait4", //7
5103 "SYS_8",
5104 "SYS_link", //9
5105 "SYS_unlink", //10
5106 "SYS_11",
5107 "SYS_chdir", //12
5108 "SYS_fchdir", //13
5109 "SYS_mknod", //14
5110 "SYS_chmod", //15
5111 "SYS_chown", //16
5112 "SYS_break", //17
5113 "SYS_18",
5114 "SYS_19",
5115 "SYS_getpid", //20
5116 "SYS_mount", //21
5117 "SYS_unmount", //22
5118 "SYS_setuid", //23
5119 "SYS_getuid", //24
5120 "SYS_geteuid", //25
5121 "SYS_ptrace", //26
5122 "SYS_recvmsg", //27
5123 "SYS_sendmsg", //28
5124 "SYS_recvfrom", //29
5125 "SYS_accept", //30
5126 "SYS_getpeername", //31
5127 "SYS_getsockname", //32
5128 "SYS_access", //33
5129 "SYS_chflags", //34
5130 "SYS_fchflags", //35
5131 "SYS_sync", //36
5132 "SYS_kill", //37
5133 "SYS_38",
5134 "SYS_getppid", //39
5135 "SYS_40",
5136 "SYS_dup", //41
5137 "SYS_opipe", //42
5138 "SYS_getegid", //43
5139 "SYS_profil", //44
5140 "SYS_ktrace", //45
5141 "SYS_sigaction", //46
5142 "SYS_getgid", //47
5143 "SYS_sigprocmask", //48
5144 "SYS_getlogin", //49
5145 "SYS_setlogin", //50
5146 "SYS_acct", //51
5147 "SYS_sigpending", //52
5148 "SYS_osigaltstack", //53
5149 "SYS_ioctl", //54
5150 "SYS_reboot", //55
5151 "SYS_revoke", //56
5152 "SYS_symlink", //57
5153 "SYS_readlink", //58
5154 "SYS_execve", //59
5155 "SYS_umask", //60
5156 "SYS_chroot", //61
5157 "SYS_62",
5158 "SYS_63",
5159 "SYS_64",
5160 "SYS_65",
5161 "SYS_vfork", //66
5162 "SYS_67",
5163 "SYS_68",
5164 "SYS_sbrk", //69
5165 "SYS_sstk", //70
5166 "SYS_61",
5167 "SYS_vadvise", //72
5168 "SYS_munmap", //73
5169 "SYS_mprotect", //74
5170 "SYS_madvise", //75
5171 "SYS_76",
5172 "SYS_77",
5173 "SYS_mincore", //78
5174 "SYS_getgroups", //79
5175 "SYS_setgroups", //80
5176 "SYS_getpgrp", //81
5177 "SYS_setpgid", //82
5178 "SYS_setitimer", //83
5179 "SYS_84",
5180 "SYS_85",
5181 "SYS_getitimer", //86
5182 "SYS_87",
5183 "SYS_88",
5184 "SYS_89",
5185 "SYS_dup2", //90
5186 "SYS_91",
5187 "SYS_fcntl", //92
5188 "SYS_select", //93
5189 "SYS_94",
5190 "SYS_fsync", //95
5191 "SYS_setpriority", //96
5192 "SYS_socket", //97
5193 "SYS_connect", //98
5194 "SYS_99",
5195 "SYS_getpriority", //100
5196 "SYS_101",
5197 "SYS_102",
5198 "SYS_sigreturn", //103
5199 "SYS_bind", //104
5200 "SYS_setsockopt", //105
5201 "SYS_listen", //106
5202 "SYS_107",
5203 "SYS_108",
5204 "SYS_109",
5205 "SYS_110",
5206 "SYS_sigsuspend", //111
5207 "SYS_112",
5208 "SYS_113",
5209 "SYS_114",
5210 "SYS_115",
5211 "SYS_gettimeofday", //116
5212 "SYS_getrusage", //117
5213 "SYS_getsockopt", //118
5214 "SYS_119",
5215 "SYS_readv", //120
5216 "SYS_writev", //121
5217 "SYS_settimeofday", //122
5218 "SYS_fchown", //123
5219 "SYS_fchmod", //124
5220 "SYS_125",
5221 "SYS_setreuid", //126
5222 "SYS_setregid", //127
5223 "SYS_rename", //128
5224 "SYS_129",
5225 "SYS_130",
5226 "SYS_flock", //131
5227 "SYS_mkfifo", //132
5228 "SYS_sendto", //133
5229 "SYS_shutdown", //134
5230 "SYS_socketpair", //135
5231 "SYS_mkdir", //136
5232 "SYS_rmdir", //137
5233 "SYS_utimes", //138
5234 "SYS_139",
5235 "SYS_adjtime", //140
5236 "SYS_141",
5237 "SYS_142",
5238 "SYS_143",
5239 "SYS_144",
5240 "SYS_145",
5241 "SYS_146",
5242 "SYS_setsid", //147
5243 "SYS_quotactl", //148
5244 "SYS_149",
5245 "SYS_150",
5246 "SYS_151",
5247 "SYS_152",
5248 "SYS_153",
5249 "SYS_154",
5250 "SYS_nfssvc", //155
5251 "SYS_156",
5252 "SYS_157",
5253 "SYS_158",
5254 "SYS_159",
5255 "SYS_160",
5256 "SYS_getfh", //161
5257 "SYS_162",
5258 "SYS_163",
5259 "SYS_164",
5260 "SYS_sysarch", //165
5261 "SYS_166",
5262 "SYS_167",
5263 "SYS_168",
5264 "SYS_169",
5265 "SYS_170",
5266 "SYS_171",
5267 "SYS_172",
5268 "SYS_pread", //173
5269 "SYS_pwrite", //174
5270 "SYS_175",
5271 "SYS_176",
5272 "SYS_177",
5273 "SYS_178",
5274 "SYS_179",
5275 "SYS_180",
5276 "SYS_setgid", //181
5277 "SYS_setegid", //182
5278 "SYS_seteuid", //183
5279 "SYS_lfs_bmapv", //184
5280 "SYS_lfs_markv", //185
5281 "SYS_lfs_segclean", //186
5282 "SYS_lfs_segwait", //187
5283 "SYS_188",
5284 "SYS_189",
5285 "SYS_190",
5286 "SYS_pathconf", //191
5287 "SYS_fpathconf", //192
5288 "SYS_swapctl", //193
5289 "SYS_getrlimit", //194
5290 "SYS_setrlimit", //195
5291 "SYS_getdirentries", //196
5292 "SYS_mmap", //197
5293 "SYS___syscall", //198
5294 "SYS_lseek", //199
5295 "SYS_truncate", //200
5296 "SYS_ftruncate", //201
5297 "SYS___sysctl", //202
5298 "SYS_mlock", //203
5299 "SYS_munlock", //204
5300 "SYS_205",
5301 "SYS_futimes", //206
5302 "SYS_getpgid", //207
5303 "SYS_xfspioctl", //208
5304 "SYS_209",
5305 "SYS_210",
5306 "SYS_211",
5307 "SYS_212",
5308 "SYS_213",
5309 "SYS_214",
5310 "SYS_215",
5311 "SYS_216",
5312 "SYS_217",
5313 "SYS_218",
5314 "SYS_219",
5315 "SYS_220",
5316 "SYS_semget", //221
5317 "SYS_222",
5318 "SYS_223",
5319 "SYS_224",
5320 "SYS_msgget", //225
5321 "SYS_msgsnd", //226
5322 "SYS_msgrcv", //227
5323 "SYS_shmat", //228
5324 "SYS_229",
5325 "SYS_shmdt", //230
5326 "SYS_231",
5327 "SYS_clock_gettime", //232
5328 "SYS_clock_settime", //233
5329 "SYS_clock_getres", //234
5330 "SYS_235",
5331 "SYS_236",
5332 "SYS_237",
5333 "SYS_238",
5334 "SYS_239",
5335 "SYS_nanosleep", //240
5336 "SYS_241",
5337 "SYS_242",
5338 "SYS_243",
5339 "SYS_244",
5340 "SYS_245",
5341 "SYS_246",
5342 "SYS_247",
5343 "SYS_248",
5344 "SYS_249",
5345 "SYS_minherit", //250
5346 "SYS_rfork", //251
5347 "SYS_poll", //252
5348 "SYS_issetugid", //253
5349 "SYS_lchown", //254
5350 "SYS_getsid", //255
5351 "SYS_msync", //256
5352 "SYS_257",
5353 "SYS_258",
5354 "SYS_259",
5355 "SYS_getfsstat", //260
5356 "SYS_statfs", //261
5357 "SYS_fstatfs", //262
5358 "SYS_pipe", //263
5359 "SYS_fhopen", //264
5360 "SYS_265",
5361 "SYS_fhstatfs", //266
5362 "SYS_preadv", //267
5363 "SYS_pwritev", //268
5364 "SYS_kqueue", //269
5365 "SYS_kevent", //270
5366 "SYS_mlockall", //271
5367 "SYS_munlockall", //272
5368 "SYS_getpeereid", //273
5369 "SYS_274",
5370 "SYS_275",
5371 "SYS_276",
5372 "SYS_277",
5373 "SYS_278",
5374 "SYS_279",
5375 "SYS_280",
5376 "SYS_getresuid", //281
5377 "SYS_setresuid", //282
5378 "SYS_getresgid", //283
5379 "SYS_setresgid", //284
5380 "SYS_285",
5381 "SYS_mquery", //286
5382 "SYS_closefrom", //287
5383 "SYS_sigaltstack", //288
5384 "SYS_shmget", //289
5385 "SYS_semop", //290
5386 "SYS_stat", //291
5387 "SYS_fstat", //292
5388 "SYS_lstat", //293
5389 "SYS_fhstat", //294
5390 "SYS___semctl", //295
5391 "SYS_shmctl", //296
5392 "SYS_msgctl", //297
5393 "SYS_MAXSYSCALL", //298
5394 //299
5395 //300
5396 };
5397 uint32_t uEAX;
5398 if (!LogIsEnabled())
5399 return;
5400 uEAX = CPUMGetGuestEAX(pVCpu);
5401 switch (uEAX)
5402 {
5403 default:
5404 if (uEAX < RT_ELEMENTS(apsz))
5405 {
5406 uint32_t au32Args[8] = {0};
5407 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5408 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5409 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5410 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5411 }
5412 else
5413 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5414 break;
5415 }
5416}
5417
5418
5419#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5420/**
5421 * The Dll main entry point (stub).
5422 */
5423bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5424{
5425 return true;
5426}
5427
5428void *memcpy(void *dst, const void *src, size_t size)
5429{
5430 uint8_t*pbDst = dst, *pbSrc = src;
5431 while (size-- > 0)
5432 *pbDst++ = *pbSrc++;
5433 return dst;
5434}
5435
5436#endif
5437
5438void cpu_smm_update(CPUX86State *env)
5439{
5440}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette