VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 38300

Last change on this file since 38300 was 38300, checked in by vboxsync, 13 years ago

REM,PGM: Fix A20 syncing between the VMM and the recompiler.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 179.5 KB
Line 
1/* $Id: VBoxRecompiler.c 38300 2011-08-03 11:58:41Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include <stdio.h> /* FILE */
24#include "osdep.h"
25#include "config.h"
26#include "cpu.h"
27#include "exec-all.h"
28#include "ioport.h"
29
30#include <VBox/vmm/rem.h>
31#include <VBox/vmm/vmapi.h>
32#include <VBox/vmm/tm.h>
33#include <VBox/vmm/ssm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/iom.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/pgm.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/dbgf.h>
41#include <VBox/dbg.h>
42#include <VBox/vmm/hwaccm.h>
43#include <VBox/vmm/patm.h>
44#include <VBox/vmm/csam.h>
45#include "REMInternal.h"
46#include <VBox/vmm/vm.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50#include <VBox/log.h>
51#include <iprt/semaphore.h>
52#include <iprt/asm.h>
53#include <iprt/assert.h>
54#include <iprt/thread.h>
55#include <iprt/string.h>
56
57/* Don't wanna include everything. */
58extern void cpu_exec_init_all(unsigned long tb_size);
59extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
60extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
61extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
62extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
63extern void tlb_flush(CPUX86State *env, int flush_global);
64extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
65extern void sync_ldtr(CPUX86State *env1, int selector);
66
67#ifdef VBOX_STRICT
68unsigned long get_phys_page_offset(target_ulong addr);
69#endif
70
71
72/*******************************************************************************
73* Defined Constants And Macros *
74*******************************************************************************/
75
76/** Copy 80-bit fpu register at pSrc to pDst.
77 * This is probably faster than *calling* memcpy.
78 */
79#define REM_COPY_FPU_REG(pDst, pSrc) \
80 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
81
82/** How remR3RunLoggingStep operates. */
83#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .fFlags = 0,
214 .pfnHandler = remR3CmdDisasEnableStepping,
215 .pszSyntax = "[on/off]",
216 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
217 "If no arguments show the current state."
218 }
219};
220#endif
221
222/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
223 * @todo huh??? That cannot be the case on the mac... So, this
224 * point is probably not valid any longer. */
225uint8_t *code_gen_prologue;
226
227
228/*******************************************************************************
229* Internal Functions *
230*******************************************************************************/
231void remAbort(int rc, const char *pszTip);
232extern int testmath(void);
233
234/* Put them here to avoid unused variable warning. */
235AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
236#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
237//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
238/* Why did this have to be identical?? */
239AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
240#else
241AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
242#endif
243
244
245/**
246 * Initializes the REM.
247 *
248 * @returns VBox status code.
249 * @param pVM The VM to operate on.
250 */
251REMR3DECL(int) REMR3Init(PVM pVM)
252{
253 PREMHANDLERNOTIFICATION pCur;
254 uint32_t u32Dummy;
255 int rc;
256 unsigned i;
257
258#ifdef VBOX_ENABLE_VBOXREM64
259 LogRel(("Using 64-bit aware REM\n"));
260#endif
261
262 /*
263 * Assert sanity.
264 */
265 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
266 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
267 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
268#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
269 Assert(!testmath());
270#endif
271
272 /*
273 * Init some internal data members.
274 */
275 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
276 pVM->rem.s.Env.pVM = pVM;
277#ifdef CPU_RAW_MODE_INIT
278 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
279#endif
280
281 /*
282 * Initialize the REM critical section.
283 *
284 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
285 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
286 * deadlocks. (mostly pgm vs rem locking)
287 */
288 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
289 AssertRCReturn(rc, rc);
290
291 /* ctx. */
292 pVM->rem.s.pCtx = NULL; /* set when executing code. */
293 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
294
295 /* ignore all notifications */
296 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
297
298 code_gen_prologue = RTMemExecAlloc(_1K);
299 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
300
301 cpu_exec_init_all(0);
302
303 /*
304 * Init the recompiler.
305 */
306 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
307 {
308 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
309 return VERR_GENERAL_FAILURE;
310 }
311 PVMCPU pVCpu = VMMGetCpu(pVM);
312 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
313 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
314
315 EMRemLock(pVM);
316 cpu_reset(&pVM->rem.s.Env);
317 EMRemUnlock(pVM);
318
319 /* allocate code buffer for single instruction emulation. */
320 pVM->rem.s.Env.cbCodeBuffer = 4096;
321 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
322 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
323
324 /* Finally, set the cpu_single_env global. */
325 cpu_single_env = &pVM->rem.s.Env;
326
327 /* Nothing is pending by default */
328 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
329
330 /*
331 * Register ram types.
332 */
333 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, pVM);
334 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
335 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
336 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
337 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
338
339 /* stop ignoring. */
340 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
341
342 /*
343 * Register the saved state data unit.
344 */
345 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
346 NULL, NULL, NULL,
347 NULL, remR3Save, NULL,
348 NULL, remR3Load, NULL);
349 if (RT_FAILURE(rc))
350 return rc;
351
352#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
353 /*
354 * Debugger commands.
355 */
356 static bool fRegisteredCmds = false;
357 if (!fRegisteredCmds)
358 {
359 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
360 if (RT_SUCCESS(rc))
361 fRegisteredCmds = true;
362 }
363#endif
364
365#ifdef VBOX_WITH_STATISTICS
366 /*
367 * Statistics.
368 */
369 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
370 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
371 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
372 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
373 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
374 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
375 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
376 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
377 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
378 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
379 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
380 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion.");
381
382 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
383
384 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
385 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
386 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
387 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
388 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
389 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
390 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
391 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
392 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
393 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
394 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
395
396 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
397 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
398 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
399 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
400
401 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
406 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
407
408 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
409 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
410 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
411 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
412 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
413 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
414
415 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
416#endif /* VBOX_WITH_STATISTICS */
417 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
418 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
419
420 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
421 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
422 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
423
424
425#ifdef DEBUG_ALL_LOGGING
426 loglevel = ~0;
427#endif
428
429 /*
430 * Init the handler notification lists.
431 */
432 pVM->rem.s.idxPendingList = UINT32_MAX;
433 pVM->rem.s.idxFreeList = 0;
434
435 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
436 {
437 pCur = &pVM->rem.s.aHandlerNotifications[i];
438 pCur->idxNext = i + 1;
439 pCur->idxSelf = i;
440 }
441 pCur->idxNext = UINT32_MAX; /* the last record. */
442
443 return rc;
444}
445
446
447/**
448 * Finalizes the REM initialization.
449 *
450 * This is called after all components, devices and drivers has
451 * been initialized. Its main purpose it to finish the RAM related
452 * initialization.
453 *
454 * @returns VBox status code.
455 *
456 * @param pVM The VM handle.
457 */
458REMR3DECL(int) REMR3InitFinalize(PVM pVM)
459{
460 int rc;
461
462 /*
463 * Ram size & dirty bit map.
464 */
465 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
466 pVM->rem.s.fGCPhysLastRamFixed = true;
467#ifdef RT_STRICT
468 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
469#else
470 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
471#endif
472 return rc;
473}
474
475/**
476 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
477 *
478 * @returns VBox status code.
479 * @param pVM The VM handle.
480 * @param fGuarded Whether to guard the map.
481 */
482static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
483{
484 int rc = VINF_SUCCESS;
485 RTGCPHYS cb;
486
487 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
488
489 cb = pVM->rem.s.GCPhysLastRam + 1;
490 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
491 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
492 VERR_OUT_OF_RANGE);
493
494 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
495 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
496
497 if (!fGuarded)
498 {
499 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
500 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
501 }
502 else
503 {
504 /*
505 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
506 */
507 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
508 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
509 if (cbBitmapFull == cbBitmapAligned)
510 cbBitmapFull += _4G >> PAGE_SHIFT;
511 else if (cbBitmapFull - cbBitmapAligned < _64K)
512 cbBitmapFull += _64K;
513
514 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
515 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
516
517 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
518 if (RT_FAILURE(rc))
519 {
520 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
521 AssertLogRelRCReturn(rc, rc);
522 }
523
524 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
525 }
526
527 /* initialize it. */
528 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
529 return rc;
530}
531
532
533/**
534 * Terminates the REM.
535 *
536 * Termination means cleaning up and freeing all resources,
537 * the VM it self is at this point powered off or suspended.
538 *
539 * @returns VBox status code.
540 * @param pVM The VM to operate on.
541 */
542REMR3DECL(int) REMR3Term(PVM pVM)
543{
544#ifdef VBOX_WITH_STATISTICS
545 /*
546 * Statistics.
547 */
548 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
549 STAM_DEREG(pVM, &gStatCompilationQEmu);
550 STAM_DEREG(pVM, &gStatRunCodeQEmu);
551 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
552 STAM_DEREG(pVM, &gStatTimers);
553 STAM_DEREG(pVM, &gStatTBLookup);
554 STAM_DEREG(pVM, &gStatIRQ);
555 STAM_DEREG(pVM, &gStatRawCheck);
556 STAM_DEREG(pVM, &gStatMemRead);
557 STAM_DEREG(pVM, &gStatMemWrite);
558 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
559 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
560
561 STAM_DEREG(pVM, &gStatCpuGetTSC);
562
563 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
564 STAM_DEREG(pVM, &gStatRefuseVM86);
565 STAM_DEREG(pVM, &gStatRefusePaging);
566 STAM_DEREG(pVM, &gStatRefusePAE);
567 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
568 STAM_DEREG(pVM, &gStatRefuseIF0);
569 STAM_DEREG(pVM, &gStatRefuseCode16);
570 STAM_DEREG(pVM, &gStatRefuseWP0);
571 STAM_DEREG(pVM, &gStatRefuseRing1or2);
572 STAM_DEREG(pVM, &gStatRefuseCanExecute);
573 STAM_DEREG(pVM, &gStatFlushTBs);
574
575 STAM_DEREG(pVM, &gStatREMGDTChange);
576 STAM_DEREG(pVM, &gStatREMLDTRChange);
577 STAM_DEREG(pVM, &gStatREMIDTChange);
578 STAM_DEREG(pVM, &gStatREMTRChange);
579
580 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
581 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
582 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
583 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
584 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
585 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
586
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
588 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
589 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
590 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
591 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
592 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
593
594 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
595#endif /* VBOX_WITH_STATISTICS */
596
597 STAM_REL_DEREG(pVM, &tb_flush_count);
598 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
599 STAM_REL_DEREG(pVM, &tlb_flush_count);
600
601 return VINF_SUCCESS;
602}
603
604
605/**
606 * The VM is being reset.
607 *
608 * For the REM component this means to call the cpu_reset() and
609 * reinitialize some state variables.
610 *
611 * @param pVM VM handle.
612 */
613REMR3DECL(void) REMR3Reset(PVM pVM)
614{
615 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
616
617 /*
618 * Reset the REM cpu.
619 */
620 Assert(pVM->rem.s.cIgnoreAll == 0);
621 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
622 cpu_reset(&pVM->rem.s.Env);
623 pVM->rem.s.cInvalidatedPages = 0;
624 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
625 Assert(pVM->rem.s.cIgnoreAll == 0);
626
627 /* Clear raw ring 0 init state */
628 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
629
630 /* Flush the TBs the next time we execute code here. */
631 pVM->rem.s.fFlushTBs = true;
632
633 EMRemUnlock(pVM);
634}
635
636
637/**
638 * Execute state save operation.
639 *
640 * @returns VBox status code.
641 * @param pVM VM Handle.
642 * @param pSSM SSM operation handle.
643 */
644static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
645{
646 PREM pRem = &pVM->rem.s;
647
648 /*
649 * Save the required CPU Env bits.
650 * (Not much because we're never in REM when doing the save.)
651 */
652 LogFlow(("remR3Save:\n"));
653 Assert(!pRem->fInREM);
654 SSMR3PutU32(pSSM, pRem->Env.hflags);
655 SSMR3PutU32(pSSM, ~0); /* separator */
656
657 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
658 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
659 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
660
661 return SSMR3PutU32(pSSM, ~0); /* terminator */
662}
663
664
665/**
666 * Execute state load operation.
667 *
668 * @returns VBox status code.
669 * @param pVM VM Handle.
670 * @param pSSM SSM operation handle.
671 * @param uVersion Data layout version.
672 * @param uPass The data pass.
673 */
674static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
675{
676 uint32_t u32Dummy;
677 uint32_t fRawRing0 = false;
678 uint32_t u32Sep;
679 uint32_t i;
680 int rc;
681 PREM pRem;
682
683 LogFlow(("remR3Load:\n"));
684 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
685
686 /*
687 * Validate version.
688 */
689 if ( uVersion != REM_SAVED_STATE_VERSION
690 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
691 {
692 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
693 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
694 }
695
696 /*
697 * Do a reset to be on the safe side...
698 */
699 REMR3Reset(pVM);
700
701 /*
702 * Ignore all ignorable notifications.
703 * (Not doing this will cause serious trouble.)
704 */
705 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
706
707 /*
708 * Load the required CPU Env bits.
709 * (Not much because we're never in REM when doing the save.)
710 */
711 pRem = &pVM->rem.s;
712 Assert(!pRem->fInREM);
713 SSMR3GetU32(pSSM, &pRem->Env.hflags);
714 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
715 {
716 /* Redundant REM CPU state has to be loaded, but can be ignored. */
717 CPUX86State_Ver16 temp;
718 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
719 }
720
721 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
722 if (RT_FAILURE(rc))
723 return rc;
724 if (u32Sep != ~0U)
725 {
726 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
727 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
728 }
729
730 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
731 SSMR3GetUInt(pSSM, &fRawRing0);
732 if (fRawRing0)
733 pRem->Env.state |= CPU_RAW_RING0;
734
735 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
736 {
737 /*
738 * Load the REM stuff.
739 */
740 /** @todo r=bird: We should just drop all these items, restoring doesn't make
741 * sense. */
742 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
743 if (RT_FAILURE(rc))
744 return rc;
745 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
746 {
747 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
748 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
749 }
750 for (i = 0; i < pRem->cInvalidatedPages; i++)
751 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
752 }
753
754 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
755 if (RT_FAILURE(rc))
756 return rc;
757
758 /* check the terminator. */
759 rc = SSMR3GetU32(pSSM, &u32Sep);
760 if (RT_FAILURE(rc))
761 return rc;
762 if (u32Sep != ~0U)
763 {
764 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
765 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
766 }
767
768 /*
769 * Get the CPUID features.
770 */
771 PVMCPU pVCpu = VMMGetCpu(pVM);
772 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
773 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
774
775 /*
776 * Stop ignoring ignorable notifications.
777 */
778 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
779
780 /*
781 * Sync the whole CPU state when executing code in the recompiler.
782 */
783 for (i = 0; i < pVM->cCpus; i++)
784 {
785 PVMCPU pVCpu = &pVM->aCpus[i];
786 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
787 }
788 return VINF_SUCCESS;
789}
790
791
792
793#undef LOG_GROUP
794#define LOG_GROUP LOG_GROUP_REM_RUN
795
796/**
797 * Single steps an instruction in recompiled mode.
798 *
799 * Before calling this function the REM state needs to be in sync with
800 * the VM. Call REMR3State() to perform the sync. It's only necessary
801 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
802 * and after calling REMR3StateBack().
803 *
804 * @returns VBox status code.
805 *
806 * @param pVM VM Handle.
807 * @param pVCpu VMCPU Handle.
808 */
809REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
810{
811 int rc, interrupt_request;
812 RTGCPTR GCPtrPC;
813 bool fBp;
814
815 /*
816 * Lock the REM - we don't wanna have anyone interrupting us
817 * while stepping - and enabled single stepping. We also ignore
818 * pending interrupts and suchlike.
819 */
820 interrupt_request = pVM->rem.s.Env.interrupt_request;
821 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
822 pVM->rem.s.Env.interrupt_request = 0;
823 cpu_single_step(&pVM->rem.s.Env, 1);
824
825 /*
826 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
827 */
828 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
829 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
830
831 /*
832 * Execute and handle the return code.
833 * We execute without enabling the cpu tick, so on success we'll
834 * just flip it on and off to make sure it moves
835 */
836 rc = cpu_exec(&pVM->rem.s.Env);
837 if (rc == EXCP_DEBUG)
838 {
839 TMR3NotifyResume(pVM, pVCpu);
840 TMR3NotifySuspend(pVM, pVCpu);
841 rc = VINF_EM_DBG_STEPPED;
842 }
843 else
844 {
845 switch (rc)
846 {
847 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
848 case EXCP_HLT:
849 case EXCP_HALTED: rc = VINF_EM_HALT; break;
850 case EXCP_RC:
851 rc = pVM->rem.s.rc;
852 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
853 break;
854 case EXCP_EXECUTE_RAW:
855 case EXCP_EXECUTE_HWACC:
856 /** @todo: is it correct? No! */
857 rc = VINF_SUCCESS;
858 break;
859 default:
860 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
861 rc = VERR_INTERNAL_ERROR;
862 break;
863 }
864 }
865
866 /*
867 * Restore the stuff we changed to prevent interruption.
868 * Unlock the REM.
869 */
870 if (fBp)
871 {
872 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
873 Assert(rc2 == 0); NOREF(rc2);
874 }
875 cpu_single_step(&pVM->rem.s.Env, 0);
876 pVM->rem.s.Env.interrupt_request = interrupt_request;
877
878 return rc;
879}
880
881
882/**
883 * Set a breakpoint using the REM facilities.
884 *
885 * @returns VBox status code.
886 * @param pVM The VM handle.
887 * @param Address The breakpoint address.
888 * @thread The emulation thread.
889 */
890REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
891{
892 VM_ASSERT_EMT(pVM);
893 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
894 {
895 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
896 return VINF_SUCCESS;
897 }
898 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
899 return VERR_REM_NO_MORE_BP_SLOTS;
900}
901
902
903/**
904 * Clears a breakpoint set by REMR3BreakpointSet().
905 *
906 * @returns VBox status code.
907 * @param pVM The VM handle.
908 * @param Address The breakpoint address.
909 * @thread The emulation thread.
910 */
911REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
912{
913 VM_ASSERT_EMT(pVM);
914 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
915 {
916 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
917 return VINF_SUCCESS;
918 }
919 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
920 return VERR_REM_BP_NOT_FOUND;
921}
922
923
924/**
925 * Emulate an instruction.
926 *
927 * This function executes one instruction without letting anyone
928 * interrupt it. This is intended for being called while being in
929 * raw mode and thus will take care of all the state syncing between
930 * REM and the rest.
931 *
932 * @returns VBox status code.
933 * @param pVM VM handle.
934 * @param pVCpu VMCPU Handle.
935 */
936REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
937{
938 bool fFlushTBs;
939
940 int rc, rc2;
941 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
942
943 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
944 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
945 */
946 if (HWACCMIsEnabled(pVM))
947 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
948
949 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
950 fFlushTBs = pVM->rem.s.fFlushTBs;
951 pVM->rem.s.fFlushTBs = false;
952
953 /*
954 * Sync the state and enable single instruction / single stepping.
955 */
956 rc = REMR3State(pVM, pVCpu);
957 pVM->rem.s.fFlushTBs = fFlushTBs;
958 if (RT_SUCCESS(rc))
959 {
960 int interrupt_request = pVM->rem.s.Env.interrupt_request;
961 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
962#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
963 cpu_single_step(&pVM->rem.s.Env, 0);
964#endif
965 Assert(!pVM->rem.s.Env.singlestep_enabled);
966
967 /*
968 * Now we set the execute single instruction flag and enter the cpu_exec loop.
969 */
970 TMNotifyStartOfExecution(pVCpu);
971 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
972 rc = cpu_exec(&pVM->rem.s.Env);
973 TMNotifyEndOfExecution(pVCpu);
974 switch (rc)
975 {
976 /*
977 * Executed without anything out of the way happening.
978 */
979 case EXCP_SINGLE_INSTR:
980 rc = VINF_EM_RESCHEDULE;
981 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
982 break;
983
984 /*
985 * If we take a trap or start servicing a pending interrupt, we might end up here.
986 * (Timer thread or some other thread wishing EMT's attention.)
987 */
988 case EXCP_INTERRUPT:
989 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
990 rc = VINF_EM_RESCHEDULE;
991 break;
992
993 /*
994 * Single step, we assume!
995 * If there was a breakpoint there we're fucked now.
996 */
997 case EXCP_DEBUG:
998 if (pVM->rem.s.Env.watchpoint_hit)
999 {
1000 /** @todo deal with watchpoints */
1001 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1002 rc = VINF_EM_DBG_BREAKPOINT;
1003 }
1004 else
1005 {
1006 CPUBreakpoint *pBP;
1007 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1008 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1009 if (pBP->pc == GCPtrPC)
1010 break;
1011 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1012 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1013 }
1014 break;
1015
1016 /*
1017 * hlt instruction.
1018 */
1019 case EXCP_HLT:
1020 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1021 rc = VINF_EM_HALT;
1022 break;
1023
1024 /*
1025 * The VM has halted.
1026 */
1027 case EXCP_HALTED:
1028 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1029 rc = VINF_EM_HALT;
1030 break;
1031
1032 /*
1033 * Switch to RAW-mode.
1034 */
1035 case EXCP_EXECUTE_RAW:
1036 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1037 rc = VINF_EM_RESCHEDULE_RAW;
1038 break;
1039
1040 /*
1041 * Switch to hardware accelerated RAW-mode.
1042 */
1043 case EXCP_EXECUTE_HWACC:
1044 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1045 rc = VINF_EM_RESCHEDULE_HWACC;
1046 break;
1047
1048 /*
1049 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1050 */
1051 case EXCP_RC:
1052 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1053 rc = pVM->rem.s.rc;
1054 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1055 break;
1056
1057 /*
1058 * Figure out the rest when they arrive....
1059 */
1060 default:
1061 AssertMsgFailed(("rc=%d\n", rc));
1062 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1063 rc = VINF_EM_RESCHEDULE;
1064 break;
1065 }
1066
1067 /*
1068 * Switch back the state.
1069 */
1070 pVM->rem.s.Env.interrupt_request = interrupt_request;
1071 rc2 = REMR3StateBack(pVM, pVCpu);
1072 AssertRC(rc2);
1073 }
1074
1075 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1076 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1077 return rc;
1078}
1079
1080
1081/**
1082 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1083 *
1084 * @returns VBox status code.
1085 *
1086 * @param pVM The VM handle.
1087 * @param pVCpu The Virtual CPU handle.
1088 */
1089static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1090{
1091 int rc;
1092
1093 Assert(pVM->rem.s.fInREM);
1094#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1095 cpu_single_step(&pVM->rem.s.Env, 1);
1096#else
1097 Assert(!pVM->rem.s.Env.singlestep_enabled);
1098#endif
1099
1100 /*
1101 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1102 */
1103 for (;;)
1104 {
1105 char szBuf[256];
1106
1107 /*
1108 * Log the current registers state and instruction.
1109 */
1110 remR3StateUpdate(pVM, pVCpu);
1111 DBGFR3Info(pVM, "cpumguest", NULL, NULL);
1112 szBuf[0] = '\0';
1113 rc = DBGFR3DisasInstrEx(pVM,
1114 pVCpu->idCpu,
1115 0, /* Sel */
1116 0, /* GCPtr */
1117 DBGF_DISAS_FLAGS_CURRENT_GUEST
1118 | DBGF_DISAS_FLAGS_DEFAULT_MODE
1119 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
1120 szBuf,
1121 sizeof(szBuf),
1122 NULL);
1123 if (RT_FAILURE(rc))
1124 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1125 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1126
1127 /*
1128 * Execute the instruction.
1129 */
1130 TMNotifyStartOfExecution(pVCpu);
1131
1132 if ( pVM->rem.s.Env.exception_index < 0
1133 || pVM->rem.s.Env.exception_index > 256)
1134 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1135
1136#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1137 pVM->rem.s.Env.interrupt_request = 0;
1138#else
1139 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1140#endif
1141 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1142 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1143 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1144 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1145 pVM->rem.s.Env.interrupt_request,
1146 pVM->rem.s.Env.halted,
1147 pVM->rem.s.Env.exception_index
1148 );
1149
1150 rc = cpu_exec(&pVM->rem.s.Env);
1151
1152 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1153 pVM->rem.s.Env.interrupt_request,
1154 pVM->rem.s.Env.halted,
1155 pVM->rem.s.Env.exception_index
1156 );
1157
1158 TMNotifyEndOfExecution(pVCpu);
1159
1160 switch (rc)
1161 {
1162#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1163 /*
1164 * The normal exit.
1165 */
1166 case EXCP_SINGLE_INSTR:
1167 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1168 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1169 continue;
1170 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1171 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1172 rc = VINF_SUCCESS;
1173 break;
1174
1175#else
1176 /*
1177 * The normal exit, check for breakpoints at PC just to be sure.
1178 */
1179#endif
1180 case EXCP_DEBUG:
1181 if (pVM->rem.s.Env.watchpoint_hit)
1182 {
1183 /** @todo deal with watchpoints */
1184 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1185 rc = VINF_EM_DBG_BREAKPOINT;
1186 }
1187 else
1188 {
1189 CPUBreakpoint *pBP;
1190 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1191 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1192 if (pBP->pc == GCPtrPC)
1193 break;
1194 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1195 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1196 }
1197#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1198 if (rc == VINF_EM_DBG_STEPPED)
1199 {
1200 if ( !VM_FF_ISPENDING(pVM, VM_FF_ALL_REM_MASK)
1201 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1202 continue;
1203
1204 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1205 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1206 rc = VINF_SUCCESS;
1207 }
1208#endif
1209 break;
1210
1211 /*
1212 * If we take a trap or start servicing a pending interrupt, we might end up here.
1213 * (Timer thread or some other thread wishing EMT's attention.)
1214 */
1215 case EXCP_INTERRUPT:
1216 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1217 rc = VINF_SUCCESS;
1218 break;
1219
1220 /*
1221 * hlt instruction.
1222 */
1223 case EXCP_HLT:
1224 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1225 rc = VINF_EM_HALT;
1226 break;
1227
1228 /*
1229 * The VM has halted.
1230 */
1231 case EXCP_HALTED:
1232 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1233 rc = VINF_EM_HALT;
1234 break;
1235
1236 /*
1237 * Switch to RAW-mode.
1238 */
1239 case EXCP_EXECUTE_RAW:
1240 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1241 rc = VINF_EM_RESCHEDULE_RAW;
1242 break;
1243
1244 /*
1245 * Switch to hardware accelerated RAW-mode.
1246 */
1247 case EXCP_EXECUTE_HWACC:
1248 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HWACC rc=VINF_EM_RESCHEDULE_HWACC\n");
1249 rc = VINF_EM_RESCHEDULE_HWACC;
1250 break;
1251
1252 /*
1253 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1254 */
1255 case EXCP_RC:
1256 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1257 rc = pVM->rem.s.rc;
1258 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1259 break;
1260
1261 /*
1262 * Figure out the rest when they arrive....
1263 */
1264 default:
1265 AssertMsgFailed(("rc=%d\n", rc));
1266 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1267 rc = VINF_EM_RESCHEDULE;
1268 break;
1269 }
1270 break;
1271 }
1272
1273#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1274// cpu_single_step(&pVM->rem.s.Env, 0);
1275#else
1276 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1277#endif
1278 return rc;
1279}
1280
1281
1282/**
1283 * Runs code in recompiled mode.
1284 *
1285 * Before calling this function the REM state needs to be in sync with
1286 * the VM. Call REMR3State() to perform the sync. It's only necessary
1287 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1288 * and after calling REMR3StateBack().
1289 *
1290 * @returns VBox status code.
1291 *
1292 * @param pVM VM Handle.
1293 * @param pVCpu VMCPU Handle.
1294 */
1295REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1296{
1297 int rc;
1298
1299 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1300 return remR3RunLoggingStep(pVM, pVCpu);
1301
1302 Assert(pVM->rem.s.fInREM);
1303 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1304
1305 TMNotifyStartOfExecution(pVCpu);
1306 rc = cpu_exec(&pVM->rem.s.Env);
1307 TMNotifyEndOfExecution(pVCpu);
1308 switch (rc)
1309 {
1310 /*
1311 * This happens when the execution was interrupted
1312 * by an external event, like pending timers.
1313 */
1314 case EXCP_INTERRUPT:
1315 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1316 rc = VINF_SUCCESS;
1317 break;
1318
1319 /*
1320 * hlt instruction.
1321 */
1322 case EXCP_HLT:
1323 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1324 rc = VINF_EM_HALT;
1325 break;
1326
1327 /*
1328 * The VM has halted.
1329 */
1330 case EXCP_HALTED:
1331 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1332 rc = VINF_EM_HALT;
1333 break;
1334
1335 /*
1336 * Breakpoint/single step.
1337 */
1338 case EXCP_DEBUG:
1339 if (pVM->rem.s.Env.watchpoint_hit)
1340 {
1341 /** @todo deal with watchpoints */
1342 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1343 rc = VINF_EM_DBG_BREAKPOINT;
1344 }
1345 else
1346 {
1347 CPUBreakpoint *pBP;
1348 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1349 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1350 if (pBP->pc == GCPtrPC)
1351 break;
1352 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1353 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1354 }
1355 break;
1356
1357 /*
1358 * Switch to RAW-mode.
1359 */
1360 case EXCP_EXECUTE_RAW:
1361 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1362 rc = VINF_EM_RESCHEDULE_RAW;
1363 break;
1364
1365 /*
1366 * Switch to hardware accelerated RAW-mode.
1367 */
1368 case EXCP_EXECUTE_HWACC:
1369 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1370 rc = VINF_EM_RESCHEDULE_HWACC;
1371 break;
1372
1373 /*
1374 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1375 */
1376 case EXCP_RC:
1377 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1378 rc = pVM->rem.s.rc;
1379 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1380 break;
1381
1382 /*
1383 * Figure out the rest when they arrive....
1384 */
1385 default:
1386 AssertMsgFailed(("rc=%d\n", rc));
1387 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1388 rc = VINF_SUCCESS;
1389 break;
1390 }
1391
1392 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1393 return rc;
1394}
1395
1396
1397/**
1398 * Check if the cpu state is suitable for Raw execution.
1399 *
1400 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1401 *
1402 * @param env The CPU env struct.
1403 * @param eip The EIP to check this for (might differ from env->eip).
1404 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1405 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1406 *
1407 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1408 */
1409bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1410{
1411 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1412 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1413 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1414 uint32_t u32CR0;
1415
1416#ifdef IEM_VERIFICATION_MODE
1417 return false;
1418#endif
1419
1420 /* Update counter. */
1421 env->pVM->rem.s.cCanExecuteRaw++;
1422
1423 /* Never when single stepping+logging guest code. */
1424 if (env->state & CPU_EMULATE_SINGLE_STEP)
1425 return false;
1426
1427 if (HWACCMIsEnabled(env->pVM))
1428 {
1429 CPUMCTX Ctx;
1430
1431 env->state |= CPU_RAW_HWACC;
1432
1433 /*
1434 * Create partial context for HWACCMR3CanExecuteGuest
1435 */
1436 Ctx.cr0 = env->cr[0];
1437 Ctx.cr3 = env->cr[3];
1438 Ctx.cr4 = env->cr[4];
1439
1440 Ctx.tr = env->tr.selector;
1441 Ctx.trHid.u64Base = env->tr.base;
1442 Ctx.trHid.u32Limit = env->tr.limit;
1443 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1444
1445 Ctx.ldtr = env->ldt.selector;
1446 Ctx.ldtrHid.u64Base = env->ldt.base;
1447 Ctx.ldtrHid.u32Limit = env->ldt.limit;
1448 Ctx.ldtrHid.Attr.u = (env->ldt.flags >> 8) & 0xF0FF;
1449
1450 Ctx.idtr.cbIdt = env->idt.limit;
1451 Ctx.idtr.pIdt = env->idt.base;
1452
1453 Ctx.gdtr.cbGdt = env->gdt.limit;
1454 Ctx.gdtr.pGdt = env->gdt.base;
1455
1456 Ctx.rsp = env->regs[R_ESP];
1457 Ctx.rip = env->eip;
1458
1459 Ctx.eflags.u32 = env->eflags;
1460
1461 Ctx.cs = env->segs[R_CS].selector;
1462 Ctx.csHid.u64Base = env->segs[R_CS].base;
1463 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1464 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1465
1466 Ctx.ds = env->segs[R_DS].selector;
1467 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1468 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1469 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1470
1471 Ctx.es = env->segs[R_ES].selector;
1472 Ctx.esHid.u64Base = env->segs[R_ES].base;
1473 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1474 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1475
1476 Ctx.fs = env->segs[R_FS].selector;
1477 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1478 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1479 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1480
1481 Ctx.gs = env->segs[R_GS].selector;
1482 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1483 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1484 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1485
1486 Ctx.ss = env->segs[R_SS].selector;
1487 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1488 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1489 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1490
1491 Ctx.msrEFER = env->efer;
1492
1493 /* Hardware accelerated raw-mode:
1494 *
1495 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1496 */
1497 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1498 {
1499 *piException = EXCP_EXECUTE_HWACC;
1500 return true;
1501 }
1502 return false;
1503 }
1504
1505 /*
1506 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1507 * or 32 bits protected mode ring 0 code
1508 *
1509 * The tests are ordered by the likelihood of being true during normal execution.
1510 */
1511 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1512 {
1513 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1514 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1515 return false;
1516 }
1517
1518#ifndef VBOX_RAW_V86
1519 if (fFlags & VM_MASK) {
1520 STAM_COUNTER_INC(&gStatRefuseVM86);
1521 Log2(("raw mode refused: VM_MASK\n"));
1522 return false;
1523 }
1524#endif
1525
1526 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1527 {
1528#ifndef DEBUG_bird
1529 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1530#endif
1531 return false;
1532 }
1533
1534 if (env->singlestep_enabled)
1535 {
1536 //Log2(("raw mode refused: Single step\n"));
1537 return false;
1538 }
1539
1540 if (!QTAILQ_EMPTY(&env->breakpoints))
1541 {
1542 //Log2(("raw mode refused: Breakpoints\n"));
1543 return false;
1544 }
1545
1546 if (!QTAILQ_EMPTY(&env->watchpoints))
1547 {
1548 //Log2(("raw mode refused: Watchpoints\n"));
1549 return false;
1550 }
1551
1552 u32CR0 = env->cr[0];
1553 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1554 {
1555 STAM_COUNTER_INC(&gStatRefusePaging);
1556 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1557 return false;
1558 }
1559
1560 if (env->cr[4] & CR4_PAE_MASK)
1561 {
1562 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1563 {
1564 STAM_COUNTER_INC(&gStatRefusePAE);
1565 return false;
1566 }
1567 }
1568
1569 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1570 {
1571 if (!EMIsRawRing3Enabled(env->pVM))
1572 return false;
1573
1574 if (!(env->eflags & IF_MASK))
1575 {
1576 STAM_COUNTER_INC(&gStatRefuseIF0);
1577 Log2(("raw mode refused: IF (RawR3)\n"));
1578 return false;
1579 }
1580
1581 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1582 {
1583 STAM_COUNTER_INC(&gStatRefuseWP0);
1584 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1585 return false;
1586 }
1587 }
1588 else
1589 {
1590 if (!EMIsRawRing0Enabled(env->pVM))
1591 return false;
1592
1593 // Let's start with pure 32 bits ring 0 code first
1594 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1595 {
1596 STAM_COUNTER_INC(&gStatRefuseCode16);
1597 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1598 return false;
1599 }
1600
1601 // Only R0
1602 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1603 {
1604 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1605 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1606 return false;
1607 }
1608
1609 if (!(u32CR0 & CR0_WP_MASK))
1610 {
1611 STAM_COUNTER_INC(&gStatRefuseWP0);
1612 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1613 return false;
1614 }
1615
1616 if (PATMIsPatchGCAddr(env->pVM, eip))
1617 {
1618 Log2(("raw r0 mode forced: patch code\n"));
1619 *piException = EXCP_EXECUTE_RAW;
1620 return true;
1621 }
1622
1623#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1624 if (!(env->eflags & IF_MASK))
1625 {
1626 STAM_COUNTER_INC(&gStatRefuseIF0);
1627 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1628 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1629 return false;
1630 }
1631#endif
1632
1633 env->state |= CPU_RAW_RING0;
1634 }
1635
1636 /*
1637 * Don't reschedule the first time we're called, because there might be
1638 * special reasons why we're here that is not covered by the above checks.
1639 */
1640 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1641 {
1642 Log2(("raw mode refused: first scheduling\n"));
1643 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1644 return false;
1645 }
1646
1647 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1648 *piException = EXCP_EXECUTE_RAW;
1649 return true;
1650}
1651
1652
1653/**
1654 * Fetches a code byte.
1655 *
1656 * @returns Success indicator (bool) for ease of use.
1657 * @param env The CPU environment structure.
1658 * @param GCPtrInstr Where to fetch code.
1659 * @param pu8Byte Where to store the byte on success
1660 */
1661bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1662{
1663 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1664 if (RT_SUCCESS(rc))
1665 return true;
1666 return false;
1667}
1668
1669
1670/**
1671 * Flush (or invalidate if you like) page table/dir entry.
1672 *
1673 * (invlpg instruction; tlb_flush_page)
1674 *
1675 * @param env Pointer to cpu environment.
1676 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1677 */
1678void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1679{
1680 PVM pVM = env->pVM;
1681 PCPUMCTX pCtx;
1682 int rc;
1683
1684 Assert(EMRemIsLockOwner(env->pVM));
1685
1686 /*
1687 * When we're replaying invlpg instructions or restoring a saved
1688 * state we disable this path.
1689 */
1690 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1691 return;
1692 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1693 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1694
1695 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1696
1697 /*
1698 * Update the control registers before calling PGMFlushPage.
1699 */
1700 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1701 Assert(pCtx);
1702 pCtx->cr0 = env->cr[0];
1703 pCtx->cr3 = env->cr[3];
1704 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1705 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1706 pCtx->cr4 = env->cr[4];
1707
1708 /*
1709 * Let PGM do the rest.
1710 */
1711 Assert(env->pVCpu);
1712 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1713 if (RT_FAILURE(rc))
1714 {
1715 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1716 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1717 }
1718 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1719}
1720
1721
1722#ifndef REM_PHYS_ADDR_IN_TLB
1723/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1724void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1725{
1726 void *pv;
1727 int rc;
1728
1729 /* Address must be aligned enough to fiddle with lower bits */
1730 Assert((physAddr & 0x3) == 0);
1731
1732 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1733 Assert( rc == VINF_SUCCESS
1734 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1735 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1736 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1737 if (RT_FAILURE(rc))
1738 return (void *)1;
1739 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1740 return (void *)((uintptr_t)pv | 2);
1741 return pv;
1742}
1743#endif /* REM_PHYS_ADDR_IN_TLB */
1744
1745
1746/**
1747 * Called from tlb_protect_code in order to write monitor a code page.
1748 *
1749 * @param env Pointer to the CPU environment.
1750 * @param GCPtr Code page to monitor
1751 */
1752void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1753{
1754#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1755 Assert(env->pVM->rem.s.fInREM);
1756 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1757 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1758 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1759 && !(env->eflags & VM_MASK) /* no V86 mode */
1760 && !HWACCMIsEnabled(env->pVM))
1761 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1762#endif
1763}
1764
1765
1766/**
1767 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1768 *
1769 * @param env Pointer to the CPU environment.
1770 * @param GCPtr Code page to monitor
1771 */
1772void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1773{
1774 Assert(env->pVM->rem.s.fInREM);
1775#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1776 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1777 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1778 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1779 && !(env->eflags & VM_MASK) /* no V86 mode */
1780 && !HWACCMIsEnabled(env->pVM))
1781 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1782#endif
1783}
1784
1785
1786/**
1787 * Called when the CPU is initialized, any of the CRx registers are changed or
1788 * when the A20 line is modified.
1789 *
1790 * @param env Pointer to the CPU environment.
1791 * @param fGlobal Set if the flush is global.
1792 */
1793void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1794{
1795 PVM pVM = env->pVM;
1796 PCPUMCTX pCtx;
1797 Assert(EMRemIsLockOwner(pVM));
1798
1799 /*
1800 * When we're replaying invlpg instructions or restoring a saved
1801 * state we disable this path.
1802 */
1803 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1804 return;
1805 Assert(pVM->rem.s.fInREM);
1806
1807 /*
1808 * The caller doesn't check cr4, so we have to do that for ourselves.
1809 */
1810 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1811 fGlobal = true;
1812 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1813
1814 /*
1815 * Update the control registers before calling PGMR3FlushTLB.
1816 */
1817 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1818 Assert(pCtx);
1819 pCtx->cr0 = env->cr[0];
1820 pCtx->cr3 = env->cr[3];
1821 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1822 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1823 pCtx->cr4 = env->cr[4];
1824
1825 /*
1826 * Let PGM do the rest.
1827 */
1828 Assert(env->pVCpu);
1829 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1830}
1831
1832
1833/**
1834 * Called when any of the cr0, cr4 or efer registers is updated.
1835 *
1836 * @param env Pointer to the CPU environment.
1837 */
1838void remR3ChangeCpuMode(CPUX86State *env)
1839{
1840 PVM pVM = env->pVM;
1841 uint64_t efer;
1842 PCPUMCTX pCtx;
1843 int rc;
1844
1845 /*
1846 * When we're replaying loads or restoring a saved
1847 * state this path is disabled.
1848 */
1849 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1850 return;
1851 Assert(pVM->rem.s.fInREM);
1852
1853 /*
1854 * Update the control registers before calling PGMChangeMode()
1855 * as it may need to map whatever cr3 is pointing to.
1856 */
1857 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1858 Assert(pCtx);
1859 pCtx->cr0 = env->cr[0];
1860 pCtx->cr3 = env->cr[3];
1861 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1862 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1863 pCtx->cr4 = env->cr[4];
1864#ifdef TARGET_X86_64
1865 efer = env->efer;
1866 pCtx->msrEFER = efer;
1867#else
1868 efer = 0;
1869#endif
1870 Assert(env->pVCpu);
1871 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1872 if (rc != VINF_SUCCESS)
1873 {
1874 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1875 {
1876 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1877 remR3RaiseRC(env->pVM, rc);
1878 }
1879 else
1880 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1881 }
1882}
1883
1884
1885/**
1886 * Called from compiled code to run dma.
1887 *
1888 * @param env Pointer to the CPU environment.
1889 */
1890void remR3DmaRun(CPUX86State *env)
1891{
1892 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1893 PDMR3DmaRun(env->pVM);
1894 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1895}
1896
1897
1898/**
1899 * Called from compiled code to schedule pending timers in VMM
1900 *
1901 * @param env Pointer to the CPU environment.
1902 */
1903void remR3TimersRun(CPUX86State *env)
1904{
1905 LogFlow(("remR3TimersRun:\n"));
1906 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1907 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1908 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1909 TMR3TimerQueuesDo(env->pVM);
1910 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1911 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1912}
1913
1914
1915/**
1916 * Record trap occurrence
1917 *
1918 * @returns VBox status code
1919 * @param env Pointer to the CPU environment.
1920 * @param uTrap Trap nr
1921 * @param uErrorCode Error code
1922 * @param pvNextEIP Next EIP
1923 */
1924int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1925{
1926 PVM pVM = env->pVM;
1927#ifdef VBOX_WITH_STATISTICS
1928 static STAMCOUNTER s_aStatTrap[255];
1929 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1930#endif
1931
1932#ifdef VBOX_WITH_STATISTICS
1933 if (uTrap < 255)
1934 {
1935 if (!s_aRegisters[uTrap])
1936 {
1937 char szStatName[64];
1938 s_aRegisters[uTrap] = true;
1939 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1940 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1941 }
1942 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1943 }
1944#endif
1945 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1946 if( uTrap < 0x20
1947 && (env->cr[0] & X86_CR0_PE)
1948 && !(env->eflags & X86_EFL_VM))
1949 {
1950#ifdef DEBUG
1951 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1952#endif
1953 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1954 {
1955 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1956 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1957 return VERR_REM_TOO_MANY_TRAPS;
1958 }
1959 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1960 pVM->rem.s.cPendingExceptions = 1;
1961 pVM->rem.s.uPendingException = uTrap;
1962 pVM->rem.s.uPendingExcptEIP = env->eip;
1963 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1964 }
1965 else
1966 {
1967 pVM->rem.s.cPendingExceptions = 0;
1968 pVM->rem.s.uPendingException = uTrap;
1969 pVM->rem.s.uPendingExcptEIP = env->eip;
1970 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1971 }
1972 return VINF_SUCCESS;
1973}
1974
1975
1976/*
1977 * Clear current active trap
1978 *
1979 * @param pVM VM Handle.
1980 */
1981void remR3TrapClear(PVM pVM)
1982{
1983 pVM->rem.s.cPendingExceptions = 0;
1984 pVM->rem.s.uPendingException = 0;
1985 pVM->rem.s.uPendingExcptEIP = 0;
1986 pVM->rem.s.uPendingExcptCR2 = 0;
1987}
1988
1989
1990/*
1991 * Record previous call instruction addresses
1992 *
1993 * @param env Pointer to the CPU environment.
1994 */
1995void remR3RecordCall(CPUX86State *env)
1996{
1997 CSAMR3RecordCallAddress(env->pVM, env->eip);
1998}
1999
2000
2001/**
2002 * Syncs the internal REM state with the VM.
2003 *
2004 * This must be called before REMR3Run() is invoked whenever when the REM
2005 * state is not up to date. Calling it several times in a row is not
2006 * permitted.
2007 *
2008 * @returns VBox status code.
2009 *
2010 * @param pVM VM Handle.
2011 * @param pVCpu VMCPU Handle.
2012 *
2013 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2014 * no do this since the majority of the callers don't want any unnecessary of events
2015 * pending that would immediately interrupt execution.
2016 */
2017REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2018{
2019 register const CPUMCTX *pCtx;
2020 register unsigned fFlags;
2021 bool fHiddenSelRegsValid;
2022 unsigned i;
2023 TRPMEVENT enmType;
2024 uint8_t u8TrapNo;
2025 uint32_t uCpl;
2026 int rc;
2027
2028 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2029 Log2(("REMR3State:\n"));
2030
2031 pVM->rem.s.Env.pVCpu = pVCpu;
2032 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2033 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVCpu); /// @todo move this down and use fFlags.
2034
2035 Assert(!pVM->rem.s.fInREM);
2036 pVM->rem.s.fInStateSync = true;
2037
2038 /*
2039 * If we have to flush TBs, do that immediately.
2040 */
2041 if (pVM->rem.s.fFlushTBs)
2042 {
2043 STAM_COUNTER_INC(&gStatFlushTBs);
2044 tb_flush(&pVM->rem.s.Env);
2045 pVM->rem.s.fFlushTBs = false;
2046 }
2047
2048 /*
2049 * Copy the registers which require no special handling.
2050 */
2051#ifdef TARGET_X86_64
2052 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2053 Assert(R_EAX == 0);
2054 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2055 Assert(R_ECX == 1);
2056 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2057 Assert(R_EDX == 2);
2058 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2059 Assert(R_EBX == 3);
2060 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2061 Assert(R_ESP == 4);
2062 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2063 Assert(R_EBP == 5);
2064 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2065 Assert(R_ESI == 6);
2066 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2067 Assert(R_EDI == 7);
2068 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2069 pVM->rem.s.Env.regs[8] = pCtx->r8;
2070 pVM->rem.s.Env.regs[9] = pCtx->r9;
2071 pVM->rem.s.Env.regs[10] = pCtx->r10;
2072 pVM->rem.s.Env.regs[11] = pCtx->r11;
2073 pVM->rem.s.Env.regs[12] = pCtx->r12;
2074 pVM->rem.s.Env.regs[13] = pCtx->r13;
2075 pVM->rem.s.Env.regs[14] = pCtx->r14;
2076 pVM->rem.s.Env.regs[15] = pCtx->r15;
2077
2078 pVM->rem.s.Env.eip = pCtx->rip;
2079
2080 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2081#else
2082 Assert(R_EAX == 0);
2083 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2084 Assert(R_ECX == 1);
2085 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2086 Assert(R_EDX == 2);
2087 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2088 Assert(R_EBX == 3);
2089 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2090 Assert(R_ESP == 4);
2091 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2092 Assert(R_EBP == 5);
2093 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2094 Assert(R_ESI == 6);
2095 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2096 Assert(R_EDI == 7);
2097 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2098 pVM->rem.s.Env.eip = pCtx->eip;
2099
2100 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2101#endif
2102
2103 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2104
2105 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2106 for (i=0;i<8;i++)
2107 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2108
2109#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2110 /*
2111 * Clear the halted hidden flag (the interrupt waking up the CPU can
2112 * have been dispatched in raw mode).
2113 */
2114 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2115#endif
2116
2117 /*
2118 * Replay invlpg? Only if we're not flushing the TLB.
2119 */
2120 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2121 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2122 if (pVM->rem.s.cInvalidatedPages)
2123 {
2124 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2125 {
2126 RTUINT i;
2127
2128 pVM->rem.s.fIgnoreCR3Load = true;
2129 pVM->rem.s.fIgnoreInvlPg = true;
2130 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2131 {
2132 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2133 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2134 }
2135 pVM->rem.s.fIgnoreInvlPg = false;
2136 pVM->rem.s.fIgnoreCR3Load = false;
2137 }
2138 pVM->rem.s.cInvalidatedPages = 0;
2139 }
2140
2141 /* Replay notification changes. */
2142 REMR3ReplayHandlerNotifications(pVM);
2143
2144 /* Update MSRs; before CRx registers! */
2145 pVM->rem.s.Env.efer = pCtx->msrEFER;
2146 pVM->rem.s.Env.star = pCtx->msrSTAR;
2147 pVM->rem.s.Env.pat = pCtx->msrPAT;
2148#ifdef TARGET_X86_64
2149 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2150 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2151 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2152 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2153
2154 /* Update the internal long mode activate flag according to the new EFER value. */
2155 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2156 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2157 else
2158 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2159#endif
2160
2161 /*
2162 * Sync the A20 gate.
2163 */
2164 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2165 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2166 {
2167 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2168 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2169 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2170 }
2171
2172 /*
2173 * Registers which are rarely changed and require special handling / order when changed.
2174 */
2175 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2176 | CPUM_CHANGED_CR4
2177 | CPUM_CHANGED_CR0
2178 | CPUM_CHANGED_CR3
2179 | CPUM_CHANGED_GDTR
2180 | CPUM_CHANGED_IDTR
2181 | CPUM_CHANGED_SYSENTER_MSR
2182 | CPUM_CHANGED_LDTR
2183 | CPUM_CHANGED_CPUID
2184 | CPUM_CHANGED_FPU_REM
2185 )
2186 )
2187 {
2188 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2189 {
2190 pVM->rem.s.fIgnoreCR3Load = true;
2191 tlb_flush(&pVM->rem.s.Env, true);
2192 pVM->rem.s.fIgnoreCR3Load = false;
2193 }
2194
2195 /* CR4 before CR0! */
2196 if (fFlags & CPUM_CHANGED_CR4)
2197 {
2198 pVM->rem.s.fIgnoreCR3Load = true;
2199 pVM->rem.s.fIgnoreCpuMode = true;
2200 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2201 pVM->rem.s.fIgnoreCpuMode = false;
2202 pVM->rem.s.fIgnoreCR3Load = false;
2203 }
2204
2205 if (fFlags & CPUM_CHANGED_CR0)
2206 {
2207 pVM->rem.s.fIgnoreCR3Load = true;
2208 pVM->rem.s.fIgnoreCpuMode = true;
2209 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2210 pVM->rem.s.fIgnoreCpuMode = false;
2211 pVM->rem.s.fIgnoreCR3Load = false;
2212 }
2213
2214 if (fFlags & CPUM_CHANGED_CR3)
2215 {
2216 pVM->rem.s.fIgnoreCR3Load = true;
2217 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2218 pVM->rem.s.fIgnoreCR3Load = false;
2219 }
2220
2221 if (fFlags & CPUM_CHANGED_GDTR)
2222 {
2223 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2224 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2225 }
2226
2227 if (fFlags & CPUM_CHANGED_IDTR)
2228 {
2229 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2230 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2231 }
2232
2233 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2234 {
2235 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2236 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2237 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2238 }
2239
2240 if (fFlags & CPUM_CHANGED_LDTR)
2241 {
2242 if (fHiddenSelRegsValid)
2243 {
2244 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
2245 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
2246 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
2247 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
2248 }
2249 else
2250 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2251 }
2252
2253 if (fFlags & CPUM_CHANGED_CPUID)
2254 {
2255 uint32_t u32Dummy;
2256
2257 /*
2258 * Get the CPUID features.
2259 */
2260 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2261 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2262 }
2263
2264 /* Sync FPU state after CR4, CPUID and EFER (!). */
2265 if (fFlags & CPUM_CHANGED_FPU_REM)
2266 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2267 }
2268
2269 /*
2270 * Sync TR unconditionally to make life simpler.
2271 */
2272 pVM->rem.s.Env.tr.selector = pCtx->tr;
2273 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2274 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2275 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2276 /* Note! do_interrupt will fault if the busy flag is still set... */
2277 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2278
2279 /*
2280 * Update selector registers.
2281 * This must be done *after* we've synced gdt, ldt and crX registers
2282 * since we're reading the GDT/LDT om sync_seg. This will happen with
2283 * saved state which takes a quick dip into rawmode for instance.
2284 */
2285 /*
2286 * Stack; Note first check this one as the CPL might have changed. The
2287 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2288 */
2289
2290 if (fHiddenSelRegsValid)
2291 {
2292 /* The hidden selector registers are valid in the CPU context. */
2293 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2294
2295 /* Set current CPL */
2296 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2297
2298 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2299 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2300 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2301 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2302 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2303 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2304 }
2305 else
2306 {
2307 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2308 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2309 {
2310 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2311
2312 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2313 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2314#ifdef VBOX_WITH_STATISTICS
2315 if (pVM->rem.s.Env.segs[R_SS].newselector)
2316 {
2317 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2318 }
2319#endif
2320 }
2321 else
2322 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2323
2324 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2325 {
2326 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2327 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2328#ifdef VBOX_WITH_STATISTICS
2329 if (pVM->rem.s.Env.segs[R_ES].newselector)
2330 {
2331 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2332 }
2333#endif
2334 }
2335 else
2336 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2337
2338 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2339 {
2340 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2341 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2342#ifdef VBOX_WITH_STATISTICS
2343 if (pVM->rem.s.Env.segs[R_CS].newselector)
2344 {
2345 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2346 }
2347#endif
2348 }
2349 else
2350 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2351
2352 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2353 {
2354 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2355 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2356#ifdef VBOX_WITH_STATISTICS
2357 if (pVM->rem.s.Env.segs[R_DS].newselector)
2358 {
2359 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2360 }
2361#endif
2362 }
2363 else
2364 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2365
2366 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2367 * be the same but not the base/limit. */
2368 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2369 {
2370 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2371 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2372#ifdef VBOX_WITH_STATISTICS
2373 if (pVM->rem.s.Env.segs[R_FS].newselector)
2374 {
2375 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2376 }
2377#endif
2378 }
2379 else
2380 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2381
2382 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2383 {
2384 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2385 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2386#ifdef VBOX_WITH_STATISTICS
2387 if (pVM->rem.s.Env.segs[R_GS].newselector)
2388 {
2389 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2390 }
2391#endif
2392 }
2393 else
2394 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2395 }
2396
2397 /*
2398 * Check for traps.
2399 */
2400 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2401 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2402 if (RT_SUCCESS(rc))
2403 {
2404#ifdef DEBUG
2405 if (u8TrapNo == 0x80)
2406 {
2407 remR3DumpLnxSyscall(pVCpu);
2408 remR3DumpOBsdSyscall(pVCpu);
2409 }
2410#endif
2411
2412 pVM->rem.s.Env.exception_index = u8TrapNo;
2413 if (enmType != TRPM_SOFTWARE_INT)
2414 {
2415 pVM->rem.s.Env.exception_is_int = 0;
2416 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2417 }
2418 else
2419 {
2420 /*
2421 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2422 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2423 * for int03 and into.
2424 */
2425 pVM->rem.s.Env.exception_is_int = 1;
2426 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2427 /* int 3 may be generated by one-byte 0xcc */
2428 if (u8TrapNo == 3)
2429 {
2430 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2431 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2432 }
2433 /* int 4 may be generated by one-byte 0xce */
2434 else if (u8TrapNo == 4)
2435 {
2436 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2437 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2438 }
2439 }
2440
2441 /* get error code and cr2 if needed. */
2442 if (enmType == TRPM_TRAP)
2443 {
2444 switch (u8TrapNo)
2445 {
2446 case 0x0e:
2447 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2448 /* fallthru */
2449 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2450 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2451 break;
2452
2453 case 0x11: case 0x08:
2454 default:
2455 pVM->rem.s.Env.error_code = 0;
2456 break;
2457 }
2458 }
2459 else
2460 pVM->rem.s.Env.error_code = 0;
2461
2462 /*
2463 * We can now reset the active trap since the recompiler is gonna have a go at it.
2464 */
2465 rc = TRPMResetTrap(pVCpu);
2466 AssertRC(rc);
2467 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2468 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2469 }
2470
2471 /*
2472 * Clear old interrupt request flags; Check for pending hardware interrupts.
2473 * (See @remark for why we don't check for other FFs.)
2474 */
2475 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2476 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2477 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2478 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2479
2480 /*
2481 * We're now in REM mode.
2482 */
2483 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2484 pVM->rem.s.fInREM = true;
2485 pVM->rem.s.fInStateSync = false;
2486 pVM->rem.s.cCanExecuteRaw = 0;
2487 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2488 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2489 return VINF_SUCCESS;
2490}
2491
2492
2493/**
2494 * Syncs back changes in the REM state to the the VM state.
2495 *
2496 * This must be called after invoking REMR3Run().
2497 * Calling it several times in a row is not permitted.
2498 *
2499 * @returns VBox status code.
2500 *
2501 * @param pVM VM Handle.
2502 * @param pVCpu VMCPU Handle.
2503 */
2504REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2505{
2506 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2507 Assert(pCtx);
2508 unsigned i;
2509
2510 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2511 Log2(("REMR3StateBack:\n"));
2512 Assert(pVM->rem.s.fInREM);
2513
2514 /*
2515 * Copy back the registers.
2516 * This is done in the order they are declared in the CPUMCTX structure.
2517 */
2518
2519 /** @todo FOP */
2520 /** @todo FPUIP */
2521 /** @todo CS */
2522 /** @todo FPUDP */
2523 /** @todo DS */
2524
2525 /** @todo check if FPU/XMM was actually used in the recompiler */
2526 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2527//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2528
2529#ifdef TARGET_X86_64
2530 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2531 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2532 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2533 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2534 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2535 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2536 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2537 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2538 pCtx->r8 = pVM->rem.s.Env.regs[8];
2539 pCtx->r9 = pVM->rem.s.Env.regs[9];
2540 pCtx->r10 = pVM->rem.s.Env.regs[10];
2541 pCtx->r11 = pVM->rem.s.Env.regs[11];
2542 pCtx->r12 = pVM->rem.s.Env.regs[12];
2543 pCtx->r13 = pVM->rem.s.Env.regs[13];
2544 pCtx->r14 = pVM->rem.s.Env.regs[14];
2545 pCtx->r15 = pVM->rem.s.Env.regs[15];
2546
2547 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2548
2549#else
2550 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2551 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2552 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2553 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2554 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2555 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2556 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2557
2558 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2559#endif
2560
2561 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2562
2563#ifdef VBOX_WITH_STATISTICS
2564 if (pVM->rem.s.Env.segs[R_SS].newselector)
2565 {
2566 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2567 }
2568 if (pVM->rem.s.Env.segs[R_GS].newselector)
2569 {
2570 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2571 }
2572 if (pVM->rem.s.Env.segs[R_FS].newselector)
2573 {
2574 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2575 }
2576 if (pVM->rem.s.Env.segs[R_ES].newselector)
2577 {
2578 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2579 }
2580 if (pVM->rem.s.Env.segs[R_DS].newselector)
2581 {
2582 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2583 }
2584 if (pVM->rem.s.Env.segs[R_CS].newselector)
2585 {
2586 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2587 }
2588#endif
2589 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2590 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2591 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2592 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2593 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2594
2595#ifdef TARGET_X86_64
2596 pCtx->rip = pVM->rem.s.Env.eip;
2597 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2598#else
2599 pCtx->eip = pVM->rem.s.Env.eip;
2600 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2601#endif
2602
2603 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2604 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2605 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2606 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2607 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2608 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2609
2610 for (i = 0; i < 8; i++)
2611 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2612
2613 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2614 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2615 {
2616 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2617 STAM_COUNTER_INC(&gStatREMGDTChange);
2618 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2619 }
2620
2621 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2622 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2623 {
2624 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2625 STAM_COUNTER_INC(&gStatREMIDTChange);
2626 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2627 }
2628
2629 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2630 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2631 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2632 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2633 {
2634 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2635 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2636 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2637 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2638 STAM_COUNTER_INC(&gStatREMLDTRChange);
2639 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2640 }
2641
2642 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2643 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2644 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2645 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2646 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2647 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2648 : 0) )
2649 {
2650 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2651 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2652 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2653 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2654 pCtx->tr = pVM->rem.s.Env.tr.selector;
2655 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2656 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2657 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2658 if (pCtx->trHid.Attr.u)
2659 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2660 STAM_COUNTER_INC(&gStatREMTRChange);
2661 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2662 }
2663
2664 /** @todo These values could still be out of sync! */
2665 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2666 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2667 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2668 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2669
2670 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2671 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2672 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2673
2674 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2675 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2676 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2677
2678 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2679 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2680 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2681
2682 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2683 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2684 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2685
2686 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2687 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2688 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2689
2690 /* Sysenter MSR */
2691 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2692 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2693 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2694
2695 /* System MSRs. */
2696 pCtx->msrEFER = pVM->rem.s.Env.efer;
2697 pCtx->msrSTAR = pVM->rem.s.Env.star;
2698 pCtx->msrPAT = pVM->rem.s.Env.pat;
2699#ifdef TARGET_X86_64
2700 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2701 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2702 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2703 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2704#endif
2705
2706 remR3TrapClear(pVM);
2707
2708 /*
2709 * Check for traps.
2710 */
2711 if ( pVM->rem.s.Env.exception_index >= 0
2712 && pVM->rem.s.Env.exception_index < 256)
2713 {
2714 int rc;
2715
2716 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2717 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2718 AssertRC(rc);
2719 switch (pVM->rem.s.Env.exception_index)
2720 {
2721 case 0x0e:
2722 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2723 /* fallthru */
2724 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2725 case 0x11: case 0x08: /* 0 */
2726 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2727 break;
2728 }
2729
2730 }
2731
2732 /*
2733 * We're not longer in REM mode.
2734 */
2735 CPUMR3RemLeave(pVCpu,
2736 HWACCMIsEnabled(pVM)
2737 || ( pVM->rem.s.Env.segs[R_SS].newselector
2738 | pVM->rem.s.Env.segs[R_GS].newselector
2739 | pVM->rem.s.Env.segs[R_FS].newselector
2740 | pVM->rem.s.Env.segs[R_ES].newselector
2741 | pVM->rem.s.Env.segs[R_DS].newselector
2742 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2743 );
2744 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2745 pVM->rem.s.fInREM = false;
2746 pVM->rem.s.pCtx = NULL;
2747 pVM->rem.s.Env.pVCpu = NULL;
2748 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2749 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2750 return VINF_SUCCESS;
2751}
2752
2753
2754/**
2755 * This is called by the disassembler when it wants to update the cpu state
2756 * before for instance doing a register dump.
2757 */
2758static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2759{
2760 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2761 unsigned i;
2762
2763 Assert(pVM->rem.s.fInREM);
2764
2765 /*
2766 * Copy back the registers.
2767 * This is done in the order they are declared in the CPUMCTX structure.
2768 */
2769
2770 /** @todo FOP */
2771 /** @todo FPUIP */
2772 /** @todo CS */
2773 /** @todo FPUDP */
2774 /** @todo DS */
2775 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2776 pCtx->fpu.MXCSR = 0;
2777 pCtx->fpu.MXCSR_MASK = 0;
2778
2779 /** @todo check if FPU/XMM was actually used in the recompiler */
2780 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2781//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2782
2783#ifdef TARGET_X86_64
2784 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2785 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2786 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2787 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2788 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2789 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2790 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2791 pCtx->r8 = pVM->rem.s.Env.regs[8];
2792 pCtx->r9 = pVM->rem.s.Env.regs[9];
2793 pCtx->r10 = pVM->rem.s.Env.regs[10];
2794 pCtx->r11 = pVM->rem.s.Env.regs[11];
2795 pCtx->r12 = pVM->rem.s.Env.regs[12];
2796 pCtx->r13 = pVM->rem.s.Env.regs[13];
2797 pCtx->r14 = pVM->rem.s.Env.regs[14];
2798 pCtx->r15 = pVM->rem.s.Env.regs[15];
2799
2800 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2801#else
2802 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2803 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2804 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2805 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2806 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2807 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2808 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2809
2810 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2811#endif
2812
2813 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2814
2815 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2816 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2817 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2818 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2819 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2820
2821#ifdef TARGET_X86_64
2822 pCtx->rip = pVM->rem.s.Env.eip;
2823 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2824#else
2825 pCtx->eip = pVM->rem.s.Env.eip;
2826 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2827#endif
2828
2829 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2830 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2831 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2832 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2833 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2834 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2835
2836 for (i = 0; i < 8; i++)
2837 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2838
2839 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2840 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2841 {
2842 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2843 STAM_COUNTER_INC(&gStatREMGDTChange);
2844 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2845 }
2846
2847 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2848 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2849 {
2850 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2851 STAM_COUNTER_INC(&gStatREMIDTChange);
2852 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2853 }
2854
2855 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2856 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2857 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2858 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2859 {
2860 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2861 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2862 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2863 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2864 STAM_COUNTER_INC(&gStatREMLDTRChange);
2865 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2866 }
2867
2868 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2869 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2870 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2871 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2872 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2873 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2874 : 0) )
2875 {
2876 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2877 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2878 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2879 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2880 pCtx->tr = pVM->rem.s.Env.tr.selector;
2881 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2882 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2883 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2884 if (pCtx->trHid.Attr.u)
2885 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2886 STAM_COUNTER_INC(&gStatREMTRChange);
2887 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2888 }
2889
2890 /** @todo These values could still be out of sync! */
2891 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2892 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2893 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2894 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2895
2896 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2897 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2898 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2899
2900 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2901 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2902 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2903
2904 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2905 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2906 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2907
2908 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2909 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2910 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2911
2912 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2913 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2914 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2915
2916 /* Sysenter MSR */
2917 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2918 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2919 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2920
2921 /* System MSRs. */
2922 pCtx->msrEFER = pVM->rem.s.Env.efer;
2923 pCtx->msrSTAR = pVM->rem.s.Env.star;
2924 pCtx->msrPAT = pVM->rem.s.Env.pat;
2925#ifdef TARGET_X86_64
2926 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2927 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2928 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2929 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2930#endif
2931
2932}
2933
2934
2935/**
2936 * Update the VMM state information if we're currently in REM.
2937 *
2938 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2939 * we're currently executing in REM and the VMM state is invalid. This method will of
2940 * course check that we're executing in REM before syncing any data over to the VMM.
2941 *
2942 * @param pVM The VM handle.
2943 * @param pVCpu The VMCPU handle.
2944 */
2945REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2946{
2947 if (pVM->rem.s.fInREM)
2948 remR3StateUpdate(pVM, pVCpu);
2949}
2950
2951
2952#undef LOG_GROUP
2953#define LOG_GROUP LOG_GROUP_REM
2954
2955
2956/**
2957 * Replays the handler notification changes
2958 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2959 *
2960 * @param pVM VM handle.
2961 */
2962REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2963{
2964 /*
2965 * Replay the flushes.
2966 */
2967 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2968 VM_ASSERT_EMT(pVM);
2969
2970 /** @todo this isn't ensuring correct replay order. */
2971 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
2972 {
2973 uint32_t idxNext;
2974 uint32_t idxRevHead;
2975 uint32_t idxHead;
2976#ifdef VBOX_STRICT
2977 int32_t c = 0;
2978#endif
2979
2980 /* Lockless purging of pending notifications. */
2981 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2982 if (idxHead == UINT32_MAX)
2983 return;
2984 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2985
2986 /*
2987 * Reverse the list to process it in FIFO order.
2988 */
2989 idxRevHead = UINT32_MAX;
2990 do
2991 {
2992 /* Save the index of the next rec. */
2993 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
2994 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
2995 /* Push the record onto the reversed list. */
2996 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
2997 idxRevHead = idxHead;
2998 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2999 /* Advance. */
3000 idxHead = idxNext;
3001 } while (idxHead != UINT32_MAX);
3002
3003 /*
3004 * Loop thru the list, reinserting the record into the free list as they are
3005 * processed to avoid having other EMTs running out of entries while we're flushing.
3006 */
3007 idxHead = idxRevHead;
3008 do
3009 {
3010 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3011 uint32_t idxCur;
3012 Assert(--c >= 0);
3013
3014 switch (pCur->enmKind)
3015 {
3016 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3017 remR3NotifyHandlerPhysicalRegister(pVM,
3018 pCur->u.PhysicalRegister.enmType,
3019 pCur->u.PhysicalRegister.GCPhys,
3020 pCur->u.PhysicalRegister.cb,
3021 pCur->u.PhysicalRegister.fHasHCHandler);
3022 break;
3023
3024 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3025 remR3NotifyHandlerPhysicalDeregister(pVM,
3026 pCur->u.PhysicalDeregister.enmType,
3027 pCur->u.PhysicalDeregister.GCPhys,
3028 pCur->u.PhysicalDeregister.cb,
3029 pCur->u.PhysicalDeregister.fHasHCHandler,
3030 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3031 break;
3032
3033 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3034 remR3NotifyHandlerPhysicalModify(pVM,
3035 pCur->u.PhysicalModify.enmType,
3036 pCur->u.PhysicalModify.GCPhysOld,
3037 pCur->u.PhysicalModify.GCPhysNew,
3038 pCur->u.PhysicalModify.cb,
3039 pCur->u.PhysicalModify.fHasHCHandler,
3040 pCur->u.PhysicalModify.fRestoreAsRAM);
3041 break;
3042
3043 default:
3044 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3045 break;
3046 }
3047
3048 /*
3049 * Advance idxHead.
3050 */
3051 idxCur = idxHead;
3052 idxHead = pCur->idxNext;
3053 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3054
3055 /*
3056 * Put the record back into the free list.
3057 */
3058 do
3059 {
3060 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3061 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3062 ASMCompilerBarrier();
3063 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3064 } while (idxHead != UINT32_MAX);
3065
3066#ifdef VBOX_STRICT
3067 if (pVM->cCpus == 1)
3068 {
3069 unsigned c;
3070 /* Check that all records are now on the free list. */
3071 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3072 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3073 c++;
3074 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3075 }
3076#endif
3077 }
3078}
3079
3080
3081/**
3082 * Notify REM about changed code page.
3083 *
3084 * @returns VBox status code.
3085 * @param pVM VM handle.
3086 * @param pVCpu VMCPU handle.
3087 * @param pvCodePage Code page address
3088 */
3089REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3090{
3091#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3092 int rc;
3093 RTGCPHYS PhysGC;
3094 uint64_t flags;
3095
3096 VM_ASSERT_EMT(pVM);
3097
3098 /*
3099 * Get the physical page address.
3100 */
3101 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3102 if (rc == VINF_SUCCESS)
3103 {
3104 /*
3105 * Sync the required registers and flush the whole page.
3106 * (Easier to do the whole page than notifying it about each physical
3107 * byte that was changed.
3108 */
3109 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3110 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3111 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3112 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3113
3114 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3115 }
3116#endif
3117 return VINF_SUCCESS;
3118}
3119
3120
3121/**
3122 * Notification about a successful MMR3PhysRegister() call.
3123 *
3124 * @param pVM VM handle.
3125 * @param GCPhys The physical address the RAM.
3126 * @param cb Size of the memory.
3127 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3128 */
3129REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3130{
3131 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3132 VM_ASSERT_EMT(pVM);
3133
3134 /*
3135 * Validate input - we trust the caller.
3136 */
3137 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3138 Assert(cb);
3139 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3140 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
3141
3142 /*
3143 * Base ram? Update GCPhysLastRam.
3144 */
3145 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3146 {
3147 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3148 {
3149 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3150 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3151 }
3152 }
3153
3154 /*
3155 * Register the ram.
3156 */
3157 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3158
3159 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3160 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3161 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3162
3163 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3164}
3165
3166
3167/**
3168 * Notification about a successful MMR3PhysRomRegister() call.
3169 *
3170 * @param pVM VM handle.
3171 * @param GCPhys The physical address of the ROM.
3172 * @param cb The size of the ROM.
3173 * @param pvCopy Pointer to the ROM copy.
3174 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3175 * This function will be called when ever the protection of the
3176 * shadow ROM changes (at reset and end of POST).
3177 */
3178REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3179{
3180 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3181 VM_ASSERT_EMT(pVM);
3182
3183 /*
3184 * Validate input - we trust the caller.
3185 */
3186 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3187 Assert(cb);
3188 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3189
3190 /*
3191 * Register the rom.
3192 */
3193 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3194
3195 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3196 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3197 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3198
3199 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3200}
3201
3202
3203/**
3204 * Notification about a successful memory deregistration or reservation.
3205 *
3206 * @param pVM VM Handle.
3207 * @param GCPhys Start physical address.
3208 * @param cb The size of the range.
3209 */
3210REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3211{
3212 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3213 VM_ASSERT_EMT(pVM);
3214
3215 /*
3216 * Validate input - we trust the caller.
3217 */
3218 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3219 Assert(cb);
3220 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3221
3222 /*
3223 * Unassigning the memory.
3224 */
3225 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3226
3227 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3228 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3229 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3230
3231 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3232}
3233
3234
3235/**
3236 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3237 *
3238 * @param pVM VM Handle.
3239 * @param enmType Handler type.
3240 * @param GCPhys Handler range address.
3241 * @param cb Size of the handler range.
3242 * @param fHasHCHandler Set if the handler has a HC callback function.
3243 *
3244 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3245 * Handler memory type to memory which has no HC handler.
3246 */
3247static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3248{
3249 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3250 enmType, GCPhys, cb, fHasHCHandler));
3251
3252 VM_ASSERT_EMT(pVM);
3253 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3254 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3255
3256
3257 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3258
3259 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3260 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3261 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3262 else if (fHasHCHandler)
3263 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3264 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3265
3266 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3267}
3268
3269/**
3270 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3271 *
3272 * @param pVM VM Handle.
3273 * @param enmType Handler type.
3274 * @param GCPhys Handler range address.
3275 * @param cb Size of the handler range.
3276 * @param fHasHCHandler Set if the handler has a HC callback function.
3277 *
3278 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3279 * Handler memory type to memory which has no HC handler.
3280 */
3281REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3282{
3283 REMR3ReplayHandlerNotifications(pVM);
3284
3285 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3286}
3287
3288/**
3289 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3290 *
3291 * @param pVM VM Handle.
3292 * @param enmType Handler type.
3293 * @param GCPhys Handler range address.
3294 * @param cb Size of the handler range.
3295 * @param fHasHCHandler Set if the handler has a HC callback function.
3296 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3297 */
3298static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3299{
3300 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3301 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3302 VM_ASSERT_EMT(pVM);
3303
3304
3305 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3306
3307 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3308 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3309 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3310 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3311 else if (fHasHCHandler)
3312 {
3313 if (!fRestoreAsRAM)
3314 {
3315 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3316 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3317 }
3318 else
3319 {
3320 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3321 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3322 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3323 }
3324 }
3325 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3326
3327 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3328}
3329
3330/**
3331 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3332 *
3333 * @param pVM VM Handle.
3334 * @param enmType Handler type.
3335 * @param GCPhys Handler range address.
3336 * @param cb Size of the handler range.
3337 * @param fHasHCHandler Set if the handler has a HC callback function.
3338 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3339 */
3340REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3341{
3342 REMR3ReplayHandlerNotifications(pVM);
3343 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3344}
3345
3346
3347/**
3348 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3349 *
3350 * @param pVM VM Handle.
3351 * @param enmType Handler type.
3352 * @param GCPhysOld Old handler range address.
3353 * @param GCPhysNew New handler range address.
3354 * @param cb Size of the handler range.
3355 * @param fHasHCHandler Set if the handler has a HC callback function.
3356 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3357 */
3358static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3359{
3360 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3361 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3362 VM_ASSERT_EMT(pVM);
3363 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3364
3365 if (fHasHCHandler)
3366 {
3367 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3368
3369 /*
3370 * Reset the old page.
3371 */
3372 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3373 if (!fRestoreAsRAM)
3374 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3375 else
3376 {
3377 /* This is not perfect, but it'll do for PD monitoring... */
3378 Assert(cb == PAGE_SIZE);
3379 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3380 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3381 }
3382
3383 /*
3384 * Update the new page.
3385 */
3386 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3387 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3388 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3389 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3390
3391 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3392 }
3393}
3394
3395/**
3396 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3397 *
3398 * @param pVM VM Handle.
3399 * @param enmType Handler type.
3400 * @param GCPhysOld Old handler range address.
3401 * @param GCPhysNew New handler range address.
3402 * @param cb Size of the handler range.
3403 * @param fHasHCHandler Set if the handler has a HC callback function.
3404 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3405 */
3406REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3407{
3408 REMR3ReplayHandlerNotifications(pVM);
3409
3410 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3411}
3412
3413/**
3414 * Checks if we're handling access to this page or not.
3415 *
3416 * @returns true if we're trapping access.
3417 * @returns false if we aren't.
3418 * @param pVM The VM handle.
3419 * @param GCPhys The physical address.
3420 *
3421 * @remark This function will only work correctly in VBOX_STRICT builds!
3422 */
3423REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3424{
3425#ifdef VBOX_STRICT
3426 unsigned long off;
3427 REMR3ReplayHandlerNotifications(pVM);
3428
3429 off = get_phys_page_offset(GCPhys);
3430 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3431 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3432 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3433#else
3434 return false;
3435#endif
3436}
3437
3438
3439/**
3440 * Deals with a rare case in get_phys_addr_code where the code
3441 * is being monitored.
3442 *
3443 * It could also be an MMIO page, in which case we will raise a fatal error.
3444 *
3445 * @returns The physical address corresponding to addr.
3446 * @param env The cpu environment.
3447 * @param addr The virtual address.
3448 * @param pTLBEntry The TLB entry.
3449 */
3450target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3451 target_ulong addr,
3452 CPUTLBEntry *pTLBEntry,
3453 target_phys_addr_t ioTLBEntry)
3454{
3455 PVM pVM = env->pVM;
3456
3457 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3458 {
3459 /* If code memory is being monitored, appropriate IOTLB entry will have
3460 handler IO type, and addend will provide real physical address, no
3461 matter if we store VA in TLB or not, as handlers are always passed PA */
3462 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3463 return ret;
3464 }
3465 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3466 "*** handlers\n",
3467 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3468 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3469 LogRel(("*** mmio\n"));
3470 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3471 LogRel(("*** phys\n"));
3472 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3473 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3474 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3475 AssertFatalFailed();
3476}
3477
3478/**
3479 * Read guest RAM and ROM.
3480 *
3481 * @param SrcGCPhys The source address (guest physical).
3482 * @param pvDst The destination address.
3483 * @param cb Number of bytes
3484 */
3485void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3486{
3487 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3488 VBOX_CHECK_ADDR(SrcGCPhys);
3489 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3490#ifdef VBOX_DEBUG_PHYS
3491 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3492#endif
3493 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3494}
3495
3496
3497/**
3498 * Read guest RAM and ROM, unsigned 8-bit.
3499 *
3500 * @param SrcGCPhys The source address (guest physical).
3501 */
3502RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3503{
3504 uint8_t val;
3505 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3506 VBOX_CHECK_ADDR(SrcGCPhys);
3507 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3508 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3509#ifdef VBOX_DEBUG_PHYS
3510 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3511#endif
3512 return val;
3513}
3514
3515
3516/**
3517 * Read guest RAM and ROM, signed 8-bit.
3518 *
3519 * @param SrcGCPhys The source address (guest physical).
3520 */
3521RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3522{
3523 int8_t val;
3524 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3525 VBOX_CHECK_ADDR(SrcGCPhys);
3526 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3527 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3528#ifdef VBOX_DEBUG_PHYS
3529 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3530#endif
3531 return val;
3532}
3533
3534
3535/**
3536 * Read guest RAM and ROM, unsigned 16-bit.
3537 *
3538 * @param SrcGCPhys The source address (guest physical).
3539 */
3540RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3541{
3542 uint16_t val;
3543 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3544 VBOX_CHECK_ADDR(SrcGCPhys);
3545 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3546 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3547#ifdef VBOX_DEBUG_PHYS
3548 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3549#endif
3550 return val;
3551}
3552
3553
3554/**
3555 * Read guest RAM and ROM, signed 16-bit.
3556 *
3557 * @param SrcGCPhys The source address (guest physical).
3558 */
3559RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3560{
3561 int16_t val;
3562 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3563 VBOX_CHECK_ADDR(SrcGCPhys);
3564 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3565 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3566#ifdef VBOX_DEBUG_PHYS
3567 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3568#endif
3569 return val;
3570}
3571
3572
3573/**
3574 * Read guest RAM and ROM, unsigned 32-bit.
3575 *
3576 * @param SrcGCPhys The source address (guest physical).
3577 */
3578RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3579{
3580 uint32_t val;
3581 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3582 VBOX_CHECK_ADDR(SrcGCPhys);
3583 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3584 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3585#ifdef VBOX_DEBUG_PHYS
3586 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3587#endif
3588 return val;
3589}
3590
3591
3592/**
3593 * Read guest RAM and ROM, signed 32-bit.
3594 *
3595 * @param SrcGCPhys The source address (guest physical).
3596 */
3597RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3598{
3599 int32_t val;
3600 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3601 VBOX_CHECK_ADDR(SrcGCPhys);
3602 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3603 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3604#ifdef VBOX_DEBUG_PHYS
3605 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3606#endif
3607 return val;
3608}
3609
3610
3611/**
3612 * Read guest RAM and ROM, unsigned 64-bit.
3613 *
3614 * @param SrcGCPhys The source address (guest physical).
3615 */
3616uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3617{
3618 uint64_t val;
3619 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3620 VBOX_CHECK_ADDR(SrcGCPhys);
3621 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3622 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3623#ifdef VBOX_DEBUG_PHYS
3624 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3625#endif
3626 return val;
3627}
3628
3629
3630/**
3631 * Read guest RAM and ROM, signed 64-bit.
3632 *
3633 * @param SrcGCPhys The source address (guest physical).
3634 */
3635int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3636{
3637 int64_t val;
3638 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3639 VBOX_CHECK_ADDR(SrcGCPhys);
3640 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3641 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3642#ifdef VBOX_DEBUG_PHYS
3643 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3644#endif
3645 return val;
3646}
3647
3648
3649/**
3650 * Write guest RAM.
3651 *
3652 * @param DstGCPhys The destination address (guest physical).
3653 * @param pvSrc The source address.
3654 * @param cb Number of bytes to write
3655 */
3656void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3657{
3658 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3659 VBOX_CHECK_ADDR(DstGCPhys);
3660 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3661 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3662#ifdef VBOX_DEBUG_PHYS
3663 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3664#endif
3665}
3666
3667
3668/**
3669 * Write guest RAM, unsigned 8-bit.
3670 *
3671 * @param DstGCPhys The destination address (guest physical).
3672 * @param val Value
3673 */
3674void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3675{
3676 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3677 VBOX_CHECK_ADDR(DstGCPhys);
3678 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3679 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3680#ifdef VBOX_DEBUG_PHYS
3681 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3682#endif
3683}
3684
3685
3686/**
3687 * Write guest RAM, unsigned 8-bit.
3688 *
3689 * @param DstGCPhys The destination address (guest physical).
3690 * @param val Value
3691 */
3692void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3693{
3694 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3695 VBOX_CHECK_ADDR(DstGCPhys);
3696 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3697 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3698#ifdef VBOX_DEBUG_PHYS
3699 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3700#endif
3701}
3702
3703
3704/**
3705 * Write guest RAM, unsigned 32-bit.
3706 *
3707 * @param DstGCPhys The destination address (guest physical).
3708 * @param val Value
3709 */
3710void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3711{
3712 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3713 VBOX_CHECK_ADDR(DstGCPhys);
3714 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3715 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3716#ifdef VBOX_DEBUG_PHYS
3717 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3718#endif
3719}
3720
3721
3722/**
3723 * Write guest RAM, unsigned 64-bit.
3724 *
3725 * @param DstGCPhys The destination address (guest physical).
3726 * @param val Value
3727 */
3728void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3729{
3730 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3731 VBOX_CHECK_ADDR(DstGCPhys);
3732 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3733 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3734#ifdef VBOX_DEBUG_PHYS
3735 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3736#endif
3737}
3738
3739#undef LOG_GROUP
3740#define LOG_GROUP LOG_GROUP_REM_MMIO
3741
3742/** Read MMIO memory. */
3743static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3744{
3745 uint32_t u32 = 0;
3746 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3747 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3748 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3749 return u32;
3750}
3751
3752/** Read MMIO memory. */
3753static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3754{
3755 uint32_t u32 = 0;
3756 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3757 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3758 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3759 return u32;
3760}
3761
3762/** Read MMIO memory. */
3763static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3764{
3765 uint32_t u32 = 0;
3766 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3767 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3768 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3769 return u32;
3770}
3771
3772/** Write to MMIO memory. */
3773static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3774{
3775 int rc;
3776 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3777 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3778 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3779}
3780
3781/** Write to MMIO memory. */
3782static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3783{
3784 int rc;
3785 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3786 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3787 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3788}
3789
3790/** Write to MMIO memory. */
3791static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3792{
3793 int rc;
3794 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3795 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3796 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3797}
3798
3799
3800#undef LOG_GROUP
3801#define LOG_GROUP LOG_GROUP_REM_HANDLER
3802
3803/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3804
3805static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3806{
3807 uint8_t u8;
3808 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3809 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3810 return u8;
3811}
3812
3813static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3814{
3815 uint16_t u16;
3816 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3817 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3818 return u16;
3819}
3820
3821static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3822{
3823 uint32_t u32;
3824 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3825 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3826 return u32;
3827}
3828
3829static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3830{
3831 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3832 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3833}
3834
3835static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3836{
3837 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3838 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3839}
3840
3841static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3842{
3843 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3844 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3845}
3846
3847/* -+- disassembly -+- */
3848
3849#undef LOG_GROUP
3850#define LOG_GROUP LOG_GROUP_REM_DISAS
3851
3852
3853/**
3854 * Enables or disables singled stepped disassembly.
3855 *
3856 * @returns VBox status code.
3857 * @param pVM VM handle.
3858 * @param fEnable To enable set this flag, to disable clear it.
3859 */
3860static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3861{
3862 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3863 VM_ASSERT_EMT(pVM);
3864
3865 if (fEnable)
3866 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3867 else
3868 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3869#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3870 cpu_single_step(&pVM->rem.s.Env, fEnable);
3871#endif
3872 return VINF_SUCCESS;
3873}
3874
3875
3876/**
3877 * Enables or disables singled stepped disassembly.
3878 *
3879 * @returns VBox status code.
3880 * @param pVM VM handle.
3881 * @param fEnable To enable set this flag, to disable clear it.
3882 */
3883REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3884{
3885 int rc;
3886
3887 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3888 if (VM_IS_EMT(pVM))
3889 return remR3DisasEnableStepping(pVM, fEnable);
3890
3891 rc = VMR3ReqCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3892 AssertRC(rc);
3893 return rc;
3894}
3895
3896
3897#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3898/**
3899 * External Debugger Command: .remstep [on|off|1|0]
3900 */
3901static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
3902{
3903 int rc;
3904
3905 if (cArgs == 0)
3906 /*
3907 * Print the current status.
3908 */
3909 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
3910 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3911 else
3912 {
3913 /*
3914 * Convert the argument and change the mode.
3915 */
3916 bool fEnable;
3917 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3918 if (RT_SUCCESS(rc))
3919 {
3920 rc = REMR3DisasEnableStepping(pVM, fEnable);
3921 if (RT_SUCCESS(rc))
3922 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
3923 else
3924 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
3925 }
3926 else
3927 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
3928 }
3929 return rc;
3930}
3931#endif /* VBOX_WITH_DEBUGGER && !win.amd64 */
3932
3933
3934/**
3935 * Disassembles one instruction and prints it to the log.
3936 *
3937 * @returns Success indicator.
3938 * @param env Pointer to the recompiler CPU structure.
3939 * @param f32BitCode Indicates that whether or not the code should
3940 * be disassembled as 16 or 32 bit. If -1 the CS
3941 * selector will be inspected.
3942 * @param pszPrefix
3943 */
3944bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
3945{
3946 PVM pVM = env->pVM;
3947 const bool fLog = LogIsEnabled();
3948 const bool fLog2 = LogIs2Enabled();
3949 int rc = VINF_SUCCESS;
3950
3951 /*
3952 * Don't bother if there ain't any log output to do.
3953 */
3954 if (!fLog && !fLog2)
3955 return true;
3956
3957 /*
3958 * Update the state so DBGF reads the correct register values.
3959 */
3960 remR3StateUpdate(pVM, env->pVCpu);
3961
3962 /*
3963 * Log registers if requested.
3964 */
3965 if (fLog2)
3966 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3967
3968 /*
3969 * Disassemble to log.
3970 */
3971 if (fLog)
3972 {
3973 PVMCPU pVCpu = VMMGetCpu(pVM);
3974 char szBuf[256];
3975 szBuf[0] = '\0';
3976 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3,
3977 pVCpu->idCpu,
3978 0, /* Sel */
3979 0, /* GCPtr */
3980 DBGF_DISAS_FLAGS_CURRENT_GUEST
3981 | DBGF_DISAS_FLAGS_DEFAULT_MODE
3982 | DBGF_DISAS_FLAGS_HID_SEL_REGS_VALID,
3983 szBuf,
3984 sizeof(szBuf),
3985 NULL);
3986 if (RT_FAILURE(rc))
3987 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
3988 if (pszPrefix && *pszPrefix)
3989 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
3990 else
3991 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
3992 }
3993
3994 return RT_SUCCESS(rc);
3995}
3996
3997
3998/**
3999 * Disassemble recompiled code.
4000 *
4001 * @param phFileIgnored Ignored, logfile usually.
4002 * @param pvCode Pointer to the code block.
4003 * @param cb Size of the code block.
4004 */
4005void disas(FILE *phFile, void *pvCode, unsigned long cb)
4006{
4007 if (LogIs2Enabled())
4008 {
4009 unsigned off = 0;
4010 char szOutput[256];
4011 DISCPUSTATE Cpu;
4012
4013 memset(&Cpu, 0, sizeof(Cpu));
4014#ifdef RT_ARCH_X86
4015 Cpu.mode = CPUMODE_32BIT;
4016#else
4017 Cpu.mode = CPUMODE_64BIT;
4018#endif
4019
4020 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4021 while (off < cb)
4022 {
4023 uint32_t cbInstr;
4024 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
4025 RTLogPrintf("%s", szOutput);
4026 else
4027 {
4028 RTLogPrintf("disas error\n");
4029 cbInstr = 1;
4030#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporting 64-bit code. */
4031 break;
4032#endif
4033 }
4034 off += cbInstr;
4035 }
4036 }
4037}
4038
4039
4040/**
4041 * Disassemble guest code.
4042 *
4043 * @param phFileIgnored Ignored, logfile usually.
4044 * @param uCode The guest address of the code to disassemble. (flat?)
4045 * @param cb Number of bytes to disassemble.
4046 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4047 */
4048void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
4049{
4050 if (LogIs2Enabled())
4051 {
4052 PVM pVM = cpu_single_env->pVM;
4053 PVMCPU pVCpu = cpu_single_env->pVCpu;
4054 RTSEL cs;
4055 RTGCUINTPTR eip;
4056
4057 Assert(pVCpu);
4058
4059 /*
4060 * Update the state so DBGF reads the correct register values (flags).
4061 */
4062 remR3StateUpdate(pVM, pVCpu);
4063
4064 /*
4065 * Do the disassembling.
4066 */
4067 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4068 cs = cpu_single_env->segs[R_CS].selector;
4069 eip = uCode - cpu_single_env->segs[R_CS].base;
4070 for (;;)
4071 {
4072 char szBuf[256];
4073 uint32_t cbInstr;
4074 int rc = DBGFR3DisasInstrEx(pVM,
4075 pVCpu->idCpu,
4076 cs,
4077 eip,
4078 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4079 szBuf, sizeof(szBuf),
4080 &cbInstr);
4081 if (RT_SUCCESS(rc))
4082 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4083 else
4084 {
4085 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4086 cbInstr = 1;
4087 }
4088
4089 /* next */
4090 if (cb <= cbInstr)
4091 break;
4092 cb -= cbInstr;
4093 uCode += cbInstr;
4094 eip += cbInstr;
4095 }
4096 }
4097}
4098
4099
4100/**
4101 * Looks up a guest symbol.
4102 *
4103 * @returns Pointer to symbol name. This is a static buffer.
4104 * @param orig_addr The address in question.
4105 */
4106const char *lookup_symbol(target_ulong orig_addr)
4107{
4108 PVM pVM = cpu_single_env->pVM;
4109 RTGCINTPTR off = 0;
4110 RTDBGSYMBOL Sym;
4111 DBGFADDRESS Addr;
4112
4113 int rc = DBGFR3AsSymbolByAddr(pVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM, &Addr, orig_addr), &off, &Sym, NULL /*phMod*/);
4114 if (RT_SUCCESS(rc))
4115 {
4116 static char szSym[sizeof(Sym.szName) + 48];
4117 if (!off)
4118 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4119 else if (off > 0)
4120 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4121 else
4122 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4123 return szSym;
4124 }
4125 return "<N/A>";
4126}
4127
4128
4129#undef LOG_GROUP
4130#define LOG_GROUP LOG_GROUP_REM
4131
4132
4133/* -+- FF notifications -+- */
4134
4135
4136/**
4137 * Notification about a pending interrupt.
4138 *
4139 * @param pVM VM Handle.
4140 * @param pVCpu VMCPU Handle.
4141 * @param u8Interrupt Interrupt
4142 * @thread The emulation thread.
4143 */
4144REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4145{
4146 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4147 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4148}
4149
4150/**
4151 * Notification about a pending interrupt.
4152 *
4153 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4154 * @param pVM VM Handle.
4155 * @param pVCpu VMCPU Handle.
4156 * @thread The emulation thread.
4157 */
4158REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4159{
4160 return pVM->rem.s.u32PendingInterrupt;
4161}
4162
4163/**
4164 * Notification about the interrupt FF being set.
4165 *
4166 * @param pVM VM Handle.
4167 * @param pVCpu VMCPU Handle.
4168 * @thread The emulation thread.
4169 */
4170REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4171{
4172#ifndef IEM_VERIFICATION_MODE
4173 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4174 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4175 if (pVM->rem.s.fInREM)
4176 {
4177 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4178 CPU_INTERRUPT_EXTERNAL_HARD);
4179 }
4180#endif
4181}
4182
4183
4184/**
4185 * Notification about the interrupt FF being set.
4186 *
4187 * @param pVM VM Handle.
4188 * @param pVCpu VMCPU Handle.
4189 * @thread Any.
4190 */
4191REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4192{
4193 LogFlow(("REMR3NotifyInterruptClear:\n"));
4194 if (pVM->rem.s.fInREM)
4195 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4196}
4197
4198
4199/**
4200 * Notification about pending timer(s).
4201 *
4202 * @param pVM VM Handle.
4203 * @param pVCpuDst The target cpu for this notification.
4204 * TM will not broadcast pending timer events, but use
4205 * a dedicated EMT for them. So, only interrupt REM
4206 * execution if the given CPU is executing in REM.
4207 * @thread Any.
4208 */
4209REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4210{
4211#ifndef IEM_VERIFICATION_MODE
4212#ifndef DEBUG_bird
4213 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4214#endif
4215 if (pVM->rem.s.fInREM)
4216 {
4217 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4218 {
4219 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4220 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4221 CPU_INTERRUPT_EXTERNAL_TIMER);
4222 }
4223 else
4224 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4225 }
4226 else
4227 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4228#endif
4229}
4230
4231
4232/**
4233 * Notification about pending DMA transfers.
4234 *
4235 * @param pVM VM Handle.
4236 * @thread Any.
4237 */
4238REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4239{
4240#ifndef IEM_VERIFICATION_MODE
4241 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4242 if (pVM->rem.s.fInREM)
4243 {
4244 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4245 CPU_INTERRUPT_EXTERNAL_DMA);
4246 }
4247#endif
4248}
4249
4250
4251/**
4252 * Notification about pending timer(s).
4253 *
4254 * @param pVM VM Handle.
4255 * @thread Any.
4256 */
4257REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4258{
4259#ifndef IEM_VERIFICATION_MODE
4260 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4261 if (pVM->rem.s.fInREM)
4262 {
4263 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4264 CPU_INTERRUPT_EXTERNAL_EXIT);
4265 }
4266#endif
4267}
4268
4269
4270/**
4271 * Notification about pending FF set by an external thread.
4272 *
4273 * @param pVM VM handle.
4274 * @thread Any.
4275 */
4276REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4277{
4278#ifndef IEM_VERIFICATION_MODE
4279 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4280 if (pVM->rem.s.fInREM)
4281 {
4282 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4283 CPU_INTERRUPT_EXTERNAL_EXIT);
4284 }
4285#endif
4286}
4287
4288
4289#ifdef VBOX_WITH_STATISTICS
4290void remR3ProfileStart(int statcode)
4291{
4292 STAMPROFILEADV *pStat;
4293 switch(statcode)
4294 {
4295 case STATS_EMULATE_SINGLE_INSTR:
4296 pStat = &gStatExecuteSingleInstr;
4297 break;
4298 case STATS_QEMU_COMPILATION:
4299 pStat = &gStatCompilationQEmu;
4300 break;
4301 case STATS_QEMU_RUN_EMULATED_CODE:
4302 pStat = &gStatRunCodeQEmu;
4303 break;
4304 case STATS_QEMU_TOTAL:
4305 pStat = &gStatTotalTimeQEmu;
4306 break;
4307 case STATS_QEMU_RUN_TIMERS:
4308 pStat = &gStatTimers;
4309 break;
4310 case STATS_TLB_LOOKUP:
4311 pStat= &gStatTBLookup;
4312 break;
4313 case STATS_IRQ_HANDLING:
4314 pStat= &gStatIRQ;
4315 break;
4316 case STATS_RAW_CHECK:
4317 pStat = &gStatRawCheck;
4318 break;
4319
4320 default:
4321 AssertMsgFailed(("unknown stat %d\n", statcode));
4322 return;
4323 }
4324 STAM_PROFILE_ADV_START(pStat, a);
4325}
4326
4327
4328void remR3ProfileStop(int statcode)
4329{
4330 STAMPROFILEADV *pStat;
4331 switch(statcode)
4332 {
4333 case STATS_EMULATE_SINGLE_INSTR:
4334 pStat = &gStatExecuteSingleInstr;
4335 break;
4336 case STATS_QEMU_COMPILATION:
4337 pStat = &gStatCompilationQEmu;
4338 break;
4339 case STATS_QEMU_RUN_EMULATED_CODE:
4340 pStat = &gStatRunCodeQEmu;
4341 break;
4342 case STATS_QEMU_TOTAL:
4343 pStat = &gStatTotalTimeQEmu;
4344 break;
4345 case STATS_QEMU_RUN_TIMERS:
4346 pStat = &gStatTimers;
4347 break;
4348 case STATS_TLB_LOOKUP:
4349 pStat= &gStatTBLookup;
4350 break;
4351 case STATS_IRQ_HANDLING:
4352 pStat= &gStatIRQ;
4353 break;
4354 case STATS_RAW_CHECK:
4355 pStat = &gStatRawCheck;
4356 break;
4357 default:
4358 AssertMsgFailed(("unknown stat %d\n", statcode));
4359 return;
4360 }
4361 STAM_PROFILE_ADV_STOP(pStat, a);
4362}
4363#endif
4364
4365/**
4366 * Raise an RC, force rem exit.
4367 *
4368 * @param pVM VM handle.
4369 * @param rc The rc.
4370 */
4371void remR3RaiseRC(PVM pVM, int rc)
4372{
4373 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4374 Assert(pVM->rem.s.fInREM);
4375 VM_ASSERT_EMT(pVM);
4376 pVM->rem.s.rc = rc;
4377 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4378}
4379
4380
4381/* -+- timers -+- */
4382
4383uint64_t cpu_get_tsc(CPUX86State *env)
4384{
4385 STAM_COUNTER_INC(&gStatCpuGetTSC);
4386 return TMCpuTickGet(env->pVCpu);
4387}
4388
4389
4390/* -+- interrupts -+- */
4391
4392void cpu_set_ferr(CPUX86State *env)
4393{
4394 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4395 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4396}
4397
4398int cpu_get_pic_interrupt(CPUX86State *env)
4399{
4400 uint8_t u8Interrupt;
4401 int rc;
4402
4403 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4404 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4405 * with the (a)pic.
4406 */
4407 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4408 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4409 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4410 * remove this kludge. */
4411 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4412 {
4413 rc = VINF_SUCCESS;
4414 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4415 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4416 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4417 }
4418 else
4419 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4420
4421 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4422 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4423 if (RT_SUCCESS(rc))
4424 {
4425 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4426 env->interrupt_request |= CPU_INTERRUPT_HARD;
4427 return u8Interrupt;
4428 }
4429 return -1;
4430}
4431
4432
4433/* -+- local apic -+- */
4434
4435#if 0 /* CPUMSetGuestMsr does this now. */
4436void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4437{
4438 int rc = PDMApicSetBase(env->pVM, val);
4439 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4440}
4441#endif
4442
4443uint64_t cpu_get_apic_base(CPUX86State *env)
4444{
4445 uint64_t u64;
4446 int rc = PDMApicGetBase(env->pVM, &u64);
4447 if (RT_SUCCESS(rc))
4448 {
4449 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4450 return u64;
4451 }
4452 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4453 return 0;
4454}
4455
4456void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4457{
4458 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4459 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4460}
4461
4462uint8_t cpu_get_apic_tpr(CPUX86State *env)
4463{
4464 uint8_t u8;
4465 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4466 if (RT_SUCCESS(rc))
4467 {
4468 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4469 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4470 }
4471 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4472 return 0;
4473}
4474
4475/**
4476 * Read an MSR.
4477 *
4478 * @retval 0 success.
4479 * @retval -1 failure, raise \#GP(0).
4480 * @param env The cpu state.
4481 * @param idMsr The MSR to read.
4482 * @param puValue Where to return the value.
4483 */
4484int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4485{
4486 Assert(env->pVCpu);
4487 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4488}
4489
4490/**
4491 * Write to an MSR.
4492 *
4493 * @retval 0 success.
4494 * @retval -1 failure, raise \#GP(0).
4495 * @param env The cpu state.
4496 * @param idMsr The MSR to read.
4497 * @param puValue Where to return the value.
4498 */
4499int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4500{
4501 Assert(env->pVCpu);
4502 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4503}
4504
4505/* -+- I/O Ports -+- */
4506
4507#undef LOG_GROUP
4508#define LOG_GROUP LOG_GROUP_REM_IOPORT
4509
4510void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4511{
4512 int rc;
4513
4514 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4515 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4516
4517 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4518 if (RT_LIKELY(rc == VINF_SUCCESS))
4519 return;
4520 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4521 {
4522 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4523 remR3RaiseRC(env->pVM, rc);
4524 return;
4525 }
4526 remAbort(rc, __FUNCTION__);
4527}
4528
4529void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4530{
4531 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4532 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4533 if (RT_LIKELY(rc == VINF_SUCCESS))
4534 return;
4535 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4536 {
4537 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4538 remR3RaiseRC(env->pVM, rc);
4539 return;
4540 }
4541 remAbort(rc, __FUNCTION__);
4542}
4543
4544void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4545{
4546 int rc;
4547 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4548 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4549 if (RT_LIKELY(rc == VINF_SUCCESS))
4550 return;
4551 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4552 {
4553 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4554 remR3RaiseRC(env->pVM, rc);
4555 return;
4556 }
4557 remAbort(rc, __FUNCTION__);
4558}
4559
4560uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4561{
4562 uint32_t u32 = 0;
4563 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4564 if (RT_LIKELY(rc == VINF_SUCCESS))
4565 {
4566 if (/*addr != 0x61 && */addr != 0x71)
4567 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4568 return (uint8_t)u32;
4569 }
4570 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4571 {
4572 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4573 remR3RaiseRC(env->pVM, rc);
4574 return (uint8_t)u32;
4575 }
4576 remAbort(rc, __FUNCTION__);
4577 return UINT8_C(0xff);
4578}
4579
4580uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4581{
4582 uint32_t u32 = 0;
4583 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4584 if (RT_LIKELY(rc == VINF_SUCCESS))
4585 {
4586 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4587 return (uint16_t)u32;
4588 }
4589 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4590 {
4591 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4592 remR3RaiseRC(env->pVM, rc);
4593 return (uint16_t)u32;
4594 }
4595 remAbort(rc, __FUNCTION__);
4596 return UINT16_C(0xffff);
4597}
4598
4599uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4600{
4601 uint32_t u32 = 0;
4602 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4603 if (RT_LIKELY(rc == VINF_SUCCESS))
4604 {
4605//if (addr==0x01f0 && u32 == 0x6b6d)
4606// loglevel = ~0;
4607 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4608 return u32;
4609 }
4610 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4611 {
4612 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4613 remR3RaiseRC(env->pVM, rc);
4614 return u32;
4615 }
4616 remAbort(rc, __FUNCTION__);
4617 return UINT32_C(0xffffffff);
4618}
4619
4620#undef LOG_GROUP
4621#define LOG_GROUP LOG_GROUP_REM
4622
4623
4624/* -+- helpers and misc other interfaces -+- */
4625
4626/**
4627 * Perform the CPUID instruction.
4628 *
4629 * @param env Pointer to the recompiler CPU structure.
4630 * @param idx The CPUID leaf (eax).
4631 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4632 * @param pvEAX Where to store eax.
4633 * @param pvEBX Where to store ebx.
4634 * @param pvECX Where to store ecx.
4635 * @param pvEDX Where to store edx.
4636 */
4637void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4638 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4639{
4640 NOREF(idxSub);
4641 CPUMGetGuestCpuId(env->pVCpu, idx, pEAX, pEBX, pECX, pEDX);
4642}
4643
4644
4645#if 0 /* not used */
4646/**
4647 * Interface for qemu hardware to report back fatal errors.
4648 */
4649void hw_error(const char *pszFormat, ...)
4650{
4651 /*
4652 * Bitch about it.
4653 */
4654 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4655 * this in my Odin32 tree at home! */
4656 va_list args;
4657 va_start(args, pszFormat);
4658 RTLogPrintf("fatal error in virtual hardware:");
4659 RTLogPrintfV(pszFormat, args);
4660 va_end(args);
4661 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4662
4663 /*
4664 * If we're in REM context we'll sync back the state before 'jumping' to
4665 * the EMs failure handling.
4666 */
4667 PVM pVM = cpu_single_env->pVM;
4668 if (pVM->rem.s.fInREM)
4669 REMR3StateBack(pVM);
4670 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4671 AssertMsgFailed(("EMR3FatalError returned!\n"));
4672}
4673#endif
4674
4675/**
4676 * Interface for the qemu cpu to report unhandled situation
4677 * raising a fatal VM error.
4678 */
4679void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4680{
4681 va_list va;
4682 PVM pVM;
4683 PVMCPU pVCpu;
4684 char szMsg[256];
4685
4686 /*
4687 * Bitch about it.
4688 */
4689 RTLogFlags(NULL, "nodisabled nobuffered");
4690 RTLogFlush(NULL);
4691
4692 va_start(va, pszFormat);
4693#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4694 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4695 unsigned cArgs = 0;
4696 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4697 const char *psz = strchr(pszFormat, '%');
4698 while (psz && cArgs < 6)
4699 {
4700 auArgs[cArgs++] = va_arg(va, uintptr_t);
4701 psz = strchr(psz + 1, '%');
4702 }
4703 switch (cArgs)
4704 {
4705 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4706 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4707 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4708 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4709 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4710 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4711 default:
4712 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4713 }
4714#else
4715 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4716#endif
4717 va_end(va);
4718
4719 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4720 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4721
4722 /*
4723 * If we're in REM context we'll sync back the state before 'jumping' to
4724 * the EMs failure handling.
4725 */
4726 pVM = cpu_single_env->pVM;
4727 pVCpu = cpu_single_env->pVCpu;
4728 Assert(pVCpu);
4729
4730 if (pVM->rem.s.fInREM)
4731 REMR3StateBack(pVM, pVCpu);
4732 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4733 AssertMsgFailed(("EMR3FatalError returned!\n"));
4734}
4735
4736
4737/**
4738 * Aborts the VM.
4739 *
4740 * @param rc VBox error code.
4741 * @param pszTip Hint about why/when this happened.
4742 */
4743void remAbort(int rc, const char *pszTip)
4744{
4745 PVM pVM;
4746 PVMCPU pVCpu;
4747
4748 /*
4749 * Bitch about it.
4750 */
4751 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4752 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4753
4754 /*
4755 * Jump back to where we entered the recompiler.
4756 */
4757 pVM = cpu_single_env->pVM;
4758 pVCpu = cpu_single_env->pVCpu;
4759 Assert(pVCpu);
4760
4761 if (pVM->rem.s.fInREM)
4762 REMR3StateBack(pVM, pVCpu);
4763
4764 EMR3FatalError(pVCpu, rc);
4765 AssertMsgFailed(("EMR3FatalError returned!\n"));
4766}
4767
4768
4769/**
4770 * Dumps a linux system call.
4771 * @param pVCpu VMCPU handle.
4772 */
4773void remR3DumpLnxSyscall(PVMCPU pVCpu)
4774{
4775 static const char *apsz[] =
4776 {
4777 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4778 "sys_exit",
4779 "sys_fork",
4780 "sys_read",
4781 "sys_write",
4782 "sys_open", /* 5 */
4783 "sys_close",
4784 "sys_waitpid",
4785 "sys_creat",
4786 "sys_link",
4787 "sys_unlink", /* 10 */
4788 "sys_execve",
4789 "sys_chdir",
4790 "sys_time",
4791 "sys_mknod",
4792 "sys_chmod", /* 15 */
4793 "sys_lchown16",
4794 "sys_ni_syscall", /* old break syscall holder */
4795 "sys_stat",
4796 "sys_lseek",
4797 "sys_getpid", /* 20 */
4798 "sys_mount",
4799 "sys_oldumount",
4800 "sys_setuid16",
4801 "sys_getuid16",
4802 "sys_stime", /* 25 */
4803 "sys_ptrace",
4804 "sys_alarm",
4805 "sys_fstat",
4806 "sys_pause",
4807 "sys_utime", /* 30 */
4808 "sys_ni_syscall", /* old stty syscall holder */
4809 "sys_ni_syscall", /* old gtty syscall holder */
4810 "sys_access",
4811 "sys_nice",
4812 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4813 "sys_sync",
4814 "sys_kill",
4815 "sys_rename",
4816 "sys_mkdir",
4817 "sys_rmdir", /* 40 */
4818 "sys_dup",
4819 "sys_pipe",
4820 "sys_times",
4821 "sys_ni_syscall", /* old prof syscall holder */
4822 "sys_brk", /* 45 */
4823 "sys_setgid16",
4824 "sys_getgid16",
4825 "sys_signal",
4826 "sys_geteuid16",
4827 "sys_getegid16", /* 50 */
4828 "sys_acct",
4829 "sys_umount", /* recycled never used phys() */
4830 "sys_ni_syscall", /* old lock syscall holder */
4831 "sys_ioctl",
4832 "sys_fcntl", /* 55 */
4833 "sys_ni_syscall", /* old mpx syscall holder */
4834 "sys_setpgid",
4835 "sys_ni_syscall", /* old ulimit syscall holder */
4836 "sys_olduname",
4837 "sys_umask", /* 60 */
4838 "sys_chroot",
4839 "sys_ustat",
4840 "sys_dup2",
4841 "sys_getppid",
4842 "sys_getpgrp", /* 65 */
4843 "sys_setsid",
4844 "sys_sigaction",
4845 "sys_sgetmask",
4846 "sys_ssetmask",
4847 "sys_setreuid16", /* 70 */
4848 "sys_setregid16",
4849 "sys_sigsuspend",
4850 "sys_sigpending",
4851 "sys_sethostname",
4852 "sys_setrlimit", /* 75 */
4853 "sys_old_getrlimit",
4854 "sys_getrusage",
4855 "sys_gettimeofday",
4856 "sys_settimeofday",
4857 "sys_getgroups16", /* 80 */
4858 "sys_setgroups16",
4859 "old_select",
4860 "sys_symlink",
4861 "sys_lstat",
4862 "sys_readlink", /* 85 */
4863 "sys_uselib",
4864 "sys_swapon",
4865 "sys_reboot",
4866 "old_readdir",
4867 "old_mmap", /* 90 */
4868 "sys_munmap",
4869 "sys_truncate",
4870 "sys_ftruncate",
4871 "sys_fchmod",
4872 "sys_fchown16", /* 95 */
4873 "sys_getpriority",
4874 "sys_setpriority",
4875 "sys_ni_syscall", /* old profil syscall holder */
4876 "sys_statfs",
4877 "sys_fstatfs", /* 100 */
4878 "sys_ioperm",
4879 "sys_socketcall",
4880 "sys_syslog",
4881 "sys_setitimer",
4882 "sys_getitimer", /* 105 */
4883 "sys_newstat",
4884 "sys_newlstat",
4885 "sys_newfstat",
4886 "sys_uname",
4887 "sys_iopl", /* 110 */
4888 "sys_vhangup",
4889 "sys_ni_syscall", /* old "idle" system call */
4890 "sys_vm86old",
4891 "sys_wait4",
4892 "sys_swapoff", /* 115 */
4893 "sys_sysinfo",
4894 "sys_ipc",
4895 "sys_fsync",
4896 "sys_sigreturn",
4897 "sys_clone", /* 120 */
4898 "sys_setdomainname",
4899 "sys_newuname",
4900 "sys_modify_ldt",
4901 "sys_adjtimex",
4902 "sys_mprotect", /* 125 */
4903 "sys_sigprocmask",
4904 "sys_ni_syscall", /* old "create_module" */
4905 "sys_init_module",
4906 "sys_delete_module",
4907 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4908 "sys_quotactl",
4909 "sys_getpgid",
4910 "sys_fchdir",
4911 "sys_bdflush",
4912 "sys_sysfs", /* 135 */
4913 "sys_personality",
4914 "sys_ni_syscall", /* reserved for afs_syscall */
4915 "sys_setfsuid16",
4916 "sys_setfsgid16",
4917 "sys_llseek", /* 140 */
4918 "sys_getdents",
4919 "sys_select",
4920 "sys_flock",
4921 "sys_msync",
4922 "sys_readv", /* 145 */
4923 "sys_writev",
4924 "sys_getsid",
4925 "sys_fdatasync",
4926 "sys_sysctl",
4927 "sys_mlock", /* 150 */
4928 "sys_munlock",
4929 "sys_mlockall",
4930 "sys_munlockall",
4931 "sys_sched_setparam",
4932 "sys_sched_getparam", /* 155 */
4933 "sys_sched_setscheduler",
4934 "sys_sched_getscheduler",
4935 "sys_sched_yield",
4936 "sys_sched_get_priority_max",
4937 "sys_sched_get_priority_min", /* 160 */
4938 "sys_sched_rr_get_interval",
4939 "sys_nanosleep",
4940 "sys_mremap",
4941 "sys_setresuid16",
4942 "sys_getresuid16", /* 165 */
4943 "sys_vm86",
4944 "sys_ni_syscall", /* Old sys_query_module */
4945 "sys_poll",
4946 "sys_nfsservctl",
4947 "sys_setresgid16", /* 170 */
4948 "sys_getresgid16",
4949 "sys_prctl",
4950 "sys_rt_sigreturn",
4951 "sys_rt_sigaction",
4952 "sys_rt_sigprocmask", /* 175 */
4953 "sys_rt_sigpending",
4954 "sys_rt_sigtimedwait",
4955 "sys_rt_sigqueueinfo",
4956 "sys_rt_sigsuspend",
4957 "sys_pread64", /* 180 */
4958 "sys_pwrite64",
4959 "sys_chown16",
4960 "sys_getcwd",
4961 "sys_capget",
4962 "sys_capset", /* 185 */
4963 "sys_sigaltstack",
4964 "sys_sendfile",
4965 "sys_ni_syscall", /* reserved for streams1 */
4966 "sys_ni_syscall", /* reserved for streams2 */
4967 "sys_vfork", /* 190 */
4968 "sys_getrlimit",
4969 "sys_mmap2",
4970 "sys_truncate64",
4971 "sys_ftruncate64",
4972 "sys_stat64", /* 195 */
4973 "sys_lstat64",
4974 "sys_fstat64",
4975 "sys_lchown",
4976 "sys_getuid",
4977 "sys_getgid", /* 200 */
4978 "sys_geteuid",
4979 "sys_getegid",
4980 "sys_setreuid",
4981 "sys_setregid",
4982 "sys_getgroups", /* 205 */
4983 "sys_setgroups",
4984 "sys_fchown",
4985 "sys_setresuid",
4986 "sys_getresuid",
4987 "sys_setresgid", /* 210 */
4988 "sys_getresgid",
4989 "sys_chown",
4990 "sys_setuid",
4991 "sys_setgid",
4992 "sys_setfsuid", /* 215 */
4993 "sys_setfsgid",
4994 "sys_pivot_root",
4995 "sys_mincore",
4996 "sys_madvise",
4997 "sys_getdents64", /* 220 */
4998 "sys_fcntl64",
4999 "sys_ni_syscall", /* reserved for TUX */
5000 "sys_ni_syscall",
5001 "sys_gettid",
5002 "sys_readahead", /* 225 */
5003 "sys_setxattr",
5004 "sys_lsetxattr",
5005 "sys_fsetxattr",
5006 "sys_getxattr",
5007 "sys_lgetxattr", /* 230 */
5008 "sys_fgetxattr",
5009 "sys_listxattr",
5010 "sys_llistxattr",
5011 "sys_flistxattr",
5012 "sys_removexattr", /* 235 */
5013 "sys_lremovexattr",
5014 "sys_fremovexattr",
5015 "sys_tkill",
5016 "sys_sendfile64",
5017 "sys_futex", /* 240 */
5018 "sys_sched_setaffinity",
5019 "sys_sched_getaffinity",
5020 "sys_set_thread_area",
5021 "sys_get_thread_area",
5022 "sys_io_setup", /* 245 */
5023 "sys_io_destroy",
5024 "sys_io_getevents",
5025 "sys_io_submit",
5026 "sys_io_cancel",
5027 "sys_fadvise64", /* 250 */
5028 "sys_ni_syscall",
5029 "sys_exit_group",
5030 "sys_lookup_dcookie",
5031 "sys_epoll_create",
5032 "sys_epoll_ctl", /* 255 */
5033 "sys_epoll_wait",
5034 "sys_remap_file_pages",
5035 "sys_set_tid_address",
5036 "sys_timer_create",
5037 "sys_timer_settime", /* 260 */
5038 "sys_timer_gettime",
5039 "sys_timer_getoverrun",
5040 "sys_timer_delete",
5041 "sys_clock_settime",
5042 "sys_clock_gettime", /* 265 */
5043 "sys_clock_getres",
5044 "sys_clock_nanosleep",
5045 "sys_statfs64",
5046 "sys_fstatfs64",
5047 "sys_tgkill", /* 270 */
5048 "sys_utimes",
5049 "sys_fadvise64_64",
5050 "sys_ni_syscall" /* sys_vserver */
5051 };
5052
5053 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5054 switch (uEAX)
5055 {
5056 default:
5057 if (uEAX < RT_ELEMENTS(apsz))
5058 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5059 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5060 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5061 else
5062 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5063 break;
5064
5065 }
5066}
5067
5068
5069/**
5070 * Dumps an OpenBSD system call.
5071 * @param pVCpu VMCPU handle.
5072 */
5073void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5074{
5075 static const char *apsz[] =
5076 {
5077 "SYS_syscall", //0
5078 "SYS_exit", //1
5079 "SYS_fork", //2
5080 "SYS_read", //3
5081 "SYS_write", //4
5082 "SYS_open", //5
5083 "SYS_close", //6
5084 "SYS_wait4", //7
5085 "SYS_8",
5086 "SYS_link", //9
5087 "SYS_unlink", //10
5088 "SYS_11",
5089 "SYS_chdir", //12
5090 "SYS_fchdir", //13
5091 "SYS_mknod", //14
5092 "SYS_chmod", //15
5093 "SYS_chown", //16
5094 "SYS_break", //17
5095 "SYS_18",
5096 "SYS_19",
5097 "SYS_getpid", //20
5098 "SYS_mount", //21
5099 "SYS_unmount", //22
5100 "SYS_setuid", //23
5101 "SYS_getuid", //24
5102 "SYS_geteuid", //25
5103 "SYS_ptrace", //26
5104 "SYS_recvmsg", //27
5105 "SYS_sendmsg", //28
5106 "SYS_recvfrom", //29
5107 "SYS_accept", //30
5108 "SYS_getpeername", //31
5109 "SYS_getsockname", //32
5110 "SYS_access", //33
5111 "SYS_chflags", //34
5112 "SYS_fchflags", //35
5113 "SYS_sync", //36
5114 "SYS_kill", //37
5115 "SYS_38",
5116 "SYS_getppid", //39
5117 "SYS_40",
5118 "SYS_dup", //41
5119 "SYS_opipe", //42
5120 "SYS_getegid", //43
5121 "SYS_profil", //44
5122 "SYS_ktrace", //45
5123 "SYS_sigaction", //46
5124 "SYS_getgid", //47
5125 "SYS_sigprocmask", //48
5126 "SYS_getlogin", //49
5127 "SYS_setlogin", //50
5128 "SYS_acct", //51
5129 "SYS_sigpending", //52
5130 "SYS_osigaltstack", //53
5131 "SYS_ioctl", //54
5132 "SYS_reboot", //55
5133 "SYS_revoke", //56
5134 "SYS_symlink", //57
5135 "SYS_readlink", //58
5136 "SYS_execve", //59
5137 "SYS_umask", //60
5138 "SYS_chroot", //61
5139 "SYS_62",
5140 "SYS_63",
5141 "SYS_64",
5142 "SYS_65",
5143 "SYS_vfork", //66
5144 "SYS_67",
5145 "SYS_68",
5146 "SYS_sbrk", //69
5147 "SYS_sstk", //70
5148 "SYS_61",
5149 "SYS_vadvise", //72
5150 "SYS_munmap", //73
5151 "SYS_mprotect", //74
5152 "SYS_madvise", //75
5153 "SYS_76",
5154 "SYS_77",
5155 "SYS_mincore", //78
5156 "SYS_getgroups", //79
5157 "SYS_setgroups", //80
5158 "SYS_getpgrp", //81
5159 "SYS_setpgid", //82
5160 "SYS_setitimer", //83
5161 "SYS_84",
5162 "SYS_85",
5163 "SYS_getitimer", //86
5164 "SYS_87",
5165 "SYS_88",
5166 "SYS_89",
5167 "SYS_dup2", //90
5168 "SYS_91",
5169 "SYS_fcntl", //92
5170 "SYS_select", //93
5171 "SYS_94",
5172 "SYS_fsync", //95
5173 "SYS_setpriority", //96
5174 "SYS_socket", //97
5175 "SYS_connect", //98
5176 "SYS_99",
5177 "SYS_getpriority", //100
5178 "SYS_101",
5179 "SYS_102",
5180 "SYS_sigreturn", //103
5181 "SYS_bind", //104
5182 "SYS_setsockopt", //105
5183 "SYS_listen", //106
5184 "SYS_107",
5185 "SYS_108",
5186 "SYS_109",
5187 "SYS_110",
5188 "SYS_sigsuspend", //111
5189 "SYS_112",
5190 "SYS_113",
5191 "SYS_114",
5192 "SYS_115",
5193 "SYS_gettimeofday", //116
5194 "SYS_getrusage", //117
5195 "SYS_getsockopt", //118
5196 "SYS_119",
5197 "SYS_readv", //120
5198 "SYS_writev", //121
5199 "SYS_settimeofday", //122
5200 "SYS_fchown", //123
5201 "SYS_fchmod", //124
5202 "SYS_125",
5203 "SYS_setreuid", //126
5204 "SYS_setregid", //127
5205 "SYS_rename", //128
5206 "SYS_129",
5207 "SYS_130",
5208 "SYS_flock", //131
5209 "SYS_mkfifo", //132
5210 "SYS_sendto", //133
5211 "SYS_shutdown", //134
5212 "SYS_socketpair", //135
5213 "SYS_mkdir", //136
5214 "SYS_rmdir", //137
5215 "SYS_utimes", //138
5216 "SYS_139",
5217 "SYS_adjtime", //140
5218 "SYS_141",
5219 "SYS_142",
5220 "SYS_143",
5221 "SYS_144",
5222 "SYS_145",
5223 "SYS_146",
5224 "SYS_setsid", //147
5225 "SYS_quotactl", //148
5226 "SYS_149",
5227 "SYS_150",
5228 "SYS_151",
5229 "SYS_152",
5230 "SYS_153",
5231 "SYS_154",
5232 "SYS_nfssvc", //155
5233 "SYS_156",
5234 "SYS_157",
5235 "SYS_158",
5236 "SYS_159",
5237 "SYS_160",
5238 "SYS_getfh", //161
5239 "SYS_162",
5240 "SYS_163",
5241 "SYS_164",
5242 "SYS_sysarch", //165
5243 "SYS_166",
5244 "SYS_167",
5245 "SYS_168",
5246 "SYS_169",
5247 "SYS_170",
5248 "SYS_171",
5249 "SYS_172",
5250 "SYS_pread", //173
5251 "SYS_pwrite", //174
5252 "SYS_175",
5253 "SYS_176",
5254 "SYS_177",
5255 "SYS_178",
5256 "SYS_179",
5257 "SYS_180",
5258 "SYS_setgid", //181
5259 "SYS_setegid", //182
5260 "SYS_seteuid", //183
5261 "SYS_lfs_bmapv", //184
5262 "SYS_lfs_markv", //185
5263 "SYS_lfs_segclean", //186
5264 "SYS_lfs_segwait", //187
5265 "SYS_188",
5266 "SYS_189",
5267 "SYS_190",
5268 "SYS_pathconf", //191
5269 "SYS_fpathconf", //192
5270 "SYS_swapctl", //193
5271 "SYS_getrlimit", //194
5272 "SYS_setrlimit", //195
5273 "SYS_getdirentries", //196
5274 "SYS_mmap", //197
5275 "SYS___syscall", //198
5276 "SYS_lseek", //199
5277 "SYS_truncate", //200
5278 "SYS_ftruncate", //201
5279 "SYS___sysctl", //202
5280 "SYS_mlock", //203
5281 "SYS_munlock", //204
5282 "SYS_205",
5283 "SYS_futimes", //206
5284 "SYS_getpgid", //207
5285 "SYS_xfspioctl", //208
5286 "SYS_209",
5287 "SYS_210",
5288 "SYS_211",
5289 "SYS_212",
5290 "SYS_213",
5291 "SYS_214",
5292 "SYS_215",
5293 "SYS_216",
5294 "SYS_217",
5295 "SYS_218",
5296 "SYS_219",
5297 "SYS_220",
5298 "SYS_semget", //221
5299 "SYS_222",
5300 "SYS_223",
5301 "SYS_224",
5302 "SYS_msgget", //225
5303 "SYS_msgsnd", //226
5304 "SYS_msgrcv", //227
5305 "SYS_shmat", //228
5306 "SYS_229",
5307 "SYS_shmdt", //230
5308 "SYS_231",
5309 "SYS_clock_gettime", //232
5310 "SYS_clock_settime", //233
5311 "SYS_clock_getres", //234
5312 "SYS_235",
5313 "SYS_236",
5314 "SYS_237",
5315 "SYS_238",
5316 "SYS_239",
5317 "SYS_nanosleep", //240
5318 "SYS_241",
5319 "SYS_242",
5320 "SYS_243",
5321 "SYS_244",
5322 "SYS_245",
5323 "SYS_246",
5324 "SYS_247",
5325 "SYS_248",
5326 "SYS_249",
5327 "SYS_minherit", //250
5328 "SYS_rfork", //251
5329 "SYS_poll", //252
5330 "SYS_issetugid", //253
5331 "SYS_lchown", //254
5332 "SYS_getsid", //255
5333 "SYS_msync", //256
5334 "SYS_257",
5335 "SYS_258",
5336 "SYS_259",
5337 "SYS_getfsstat", //260
5338 "SYS_statfs", //261
5339 "SYS_fstatfs", //262
5340 "SYS_pipe", //263
5341 "SYS_fhopen", //264
5342 "SYS_265",
5343 "SYS_fhstatfs", //266
5344 "SYS_preadv", //267
5345 "SYS_pwritev", //268
5346 "SYS_kqueue", //269
5347 "SYS_kevent", //270
5348 "SYS_mlockall", //271
5349 "SYS_munlockall", //272
5350 "SYS_getpeereid", //273
5351 "SYS_274",
5352 "SYS_275",
5353 "SYS_276",
5354 "SYS_277",
5355 "SYS_278",
5356 "SYS_279",
5357 "SYS_280",
5358 "SYS_getresuid", //281
5359 "SYS_setresuid", //282
5360 "SYS_getresgid", //283
5361 "SYS_setresgid", //284
5362 "SYS_285",
5363 "SYS_mquery", //286
5364 "SYS_closefrom", //287
5365 "SYS_sigaltstack", //288
5366 "SYS_shmget", //289
5367 "SYS_semop", //290
5368 "SYS_stat", //291
5369 "SYS_fstat", //292
5370 "SYS_lstat", //293
5371 "SYS_fhstat", //294
5372 "SYS___semctl", //295
5373 "SYS_shmctl", //296
5374 "SYS_msgctl", //297
5375 "SYS_MAXSYSCALL", //298
5376 //299
5377 //300
5378 };
5379 uint32_t uEAX;
5380 if (!LogIsEnabled())
5381 return;
5382 uEAX = CPUMGetGuestEAX(pVCpu);
5383 switch (uEAX)
5384 {
5385 default:
5386 if (uEAX < RT_ELEMENTS(apsz))
5387 {
5388 uint32_t au32Args[8] = {0};
5389 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5390 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5391 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5392 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5393 }
5394 else
5395 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5396 break;
5397 }
5398}
5399
5400
5401#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5402/**
5403 * The Dll main entry point (stub).
5404 */
5405bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5406{
5407 return true;
5408}
5409
5410void *memcpy(void *dst, const void *src, size_t size)
5411{
5412 uint8_t*pbDst = dst, *pbSrc = src;
5413 while (size-- > 0)
5414 *pbDst++ = *pbSrc++;
5415 return dst;
5416}
5417
5418#endif
5419
5420void cpu_smm_update(CPUX86State *env)
5421{
5422}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette