VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 17035

Last change on this file since 17035 was 17035, checked in by vboxsync, 16 years ago

VMM,REM: Brushed up the TR/TSS shadowing. We're now relying on the hidden TR registers in SELM and CPUM/REM will make sure these are always in sync. Joined CPUMGetGuestTRHid and CPUMGetGuestTR. Kicked out sync_tr (unused now) and SELMGCGetRing1Stack.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 156.7 KB
Line 
1/* $Id: VBoxRecompiler.c 17035 2009-02-23 22:26:39Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33void cpu_exec_init_all(unsigned long tb_size);
34
35#include <VBox/rem.h>
36#include <VBox/vmapi.h>
37#include <VBox/tm.h>
38#include <VBox/ssm.h>
39#include <VBox/em.h>
40#include <VBox/trpm.h>
41#include <VBox/iom.h>
42#include <VBox/mm.h>
43#include <VBox/pgm.h>
44#include <VBox/pdm.h>
45#include <VBox/dbgf.h>
46#include <VBox/dbg.h>
47#include <VBox/hwaccm.h>
48#include <VBox/patm.h>
49#include <VBox/csam.h>
50#include "REMInternal.h"
51#include <VBox/vm.h>
52#include <VBox/param.h>
53#include <VBox/err.h>
54
55#include <VBox/log.h>
56#include <iprt/semaphore.h>
57#include <iprt/asm.h>
58#include <iprt/assert.h>
59#include <iprt/thread.h>
60#include <iprt/string.h>
61
62/* Don't wanna include everything. */
63extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
64extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
65extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
66extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
67extern void tlb_flush(CPUState *env, int flush_global);
68extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
69extern void sync_ldtr(CPUX86State *env1, int selector);
70
71#ifdef VBOX_STRICT
72unsigned long get_phys_page_offset(target_ulong addr);
73#endif
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92
93static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
101static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
103static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
104static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106
107
108/*******************************************************************************
109* Global Variables *
110*******************************************************************************/
111
112/** @todo Move stats to REM::s some rainy day we have nothing do to. */
113#ifdef VBOX_WITH_STATISTICS
114static STAMPROFILEADV gStatExecuteSingleInstr;
115static STAMPROFILEADV gStatCompilationQEmu;
116static STAMPROFILEADV gStatRunCodeQEmu;
117static STAMPROFILEADV gStatTotalTimeQEmu;
118static STAMPROFILEADV gStatTimers;
119static STAMPROFILEADV gStatTBLookup;
120static STAMPROFILEADV gStatIRQ;
121static STAMPROFILEADV gStatRawCheck;
122static STAMPROFILEADV gStatMemRead;
123static STAMPROFILEADV gStatMemWrite;
124static STAMPROFILE gStatGCPhys2HCVirt;
125static STAMPROFILE gStatHCVirt2GCPhys;
126static STAMCOUNTER gStatCpuGetTSC;
127static STAMCOUNTER gStatRefuseTFInhibit;
128static STAMCOUNTER gStatRefuseVM86;
129static STAMCOUNTER gStatRefusePaging;
130static STAMCOUNTER gStatRefusePAE;
131static STAMCOUNTER gStatRefuseIOPLNot0;
132static STAMCOUNTER gStatRefuseIF0;
133static STAMCOUNTER gStatRefuseCode16;
134static STAMCOUNTER gStatRefuseWP0;
135static STAMCOUNTER gStatRefuseRing1or2;
136static STAMCOUNTER gStatRefuseCanExecute;
137static STAMCOUNTER gStatREMGDTChange;
138static STAMCOUNTER gStatREMIDTChange;
139static STAMCOUNTER gStatREMLDTRChange;
140static STAMCOUNTER gStatREMTRChange;
141static STAMCOUNTER gStatSelOutOfSync[6];
142static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
143static STAMCOUNTER gStatFlushTBs;
144#endif
145
146/*
147 * Global stuff.
148 */
149
150/** MMIO read callbacks. */
151CPUReadMemoryFunc *g_apfnMMIORead[3] =
152{
153 remR3MMIOReadU8,
154 remR3MMIOReadU16,
155 remR3MMIOReadU32
156};
157
158/** MMIO write callbacks. */
159CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
160{
161 remR3MMIOWriteU8,
162 remR3MMIOWriteU16,
163 remR3MMIOWriteU32
164};
165
166/** Handler read callbacks. */
167CPUReadMemoryFunc *g_apfnHandlerRead[3] =
168{
169 remR3HandlerReadU8,
170 remR3HandlerReadU16,
171 remR3HandlerReadU32
172};
173
174/** Handler write callbacks. */
175CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
176{
177 remR3HandlerWriteU8,
178 remR3HandlerWriteU16,
179 remR3HandlerWriteU32
180};
181
182
183#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
184/*
185 * Debugger commands.
186 */
187static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
188
189/** '.remstep' arguments. */
190static const DBGCVARDESC g_aArgRemStep[] =
191{
192 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
193 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
194};
195
196/** Command descriptors. */
197static const DBGCCMD g_aCmds[] =
198{
199 {
200 .pszCmd ="remstep",
201 .cArgsMin = 0,
202 .cArgsMax = 1,
203 .paArgDescs = &g_aArgRemStep[0],
204 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
205 .pResultDesc = NULL,
206 .fFlags = 0,
207 .pfnHandler = remR3CmdDisasEnableStepping,
208 .pszSyntax = "[on/off]",
209 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
210 "If no arguments show the current state."
211 }
212};
213#endif
214
215
216/*******************************************************************************
217* Internal Functions *
218*******************************************************************************/
219void remAbort(int rc, const char *pszTip);
220extern int testmath(void);
221
222/* Put them here to avoid unused variable warning. */
223AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
224#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
225//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
226/* Why did this have to be identical?? */
227AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
228#else
229AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
230#endif
231
232
233/* Prologue code, must be in lower 4G to simplify jumps to/from generated code */
234uint8_t* code_gen_prologue;
235
236/**
237 * Initializes the REM.
238 *
239 * @returns VBox status code.
240 * @param pVM The VM to operate on.
241 */
242REMR3DECL(int) REMR3Init(PVM pVM)
243{
244 uint32_t u32Dummy;
245 int rc;
246
247#ifdef VBOX_ENABLE_VBOXREM64
248 LogRel(("Using 64-bit aware REM\n"));
249#endif
250
251 /*
252 * Assert sanity.
253 */
254 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
255 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
256 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
257#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
258 Assert(!testmath());
259#endif
260 /*
261 * Init some internal data members.
262 */
263 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
264 pVM->rem.s.Env.pVM = pVM;
265#ifdef CPU_RAW_MODE_INIT
266 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
267#endif
268
269 /* ctx. */
270 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
271 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
272
273 /* ignore all notifications */
274 pVM->rem.s.fIgnoreAll = true;
275
276 code_gen_prologue = RTMemExecAlloc(_1K);
277
278 cpu_exec_init_all(0);
279
280 /*
281 * Init the recompiler.
282 */
283 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
284 {
285 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
286 return VERR_GENERAL_FAILURE;
287 }
288 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
289 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
290
291 /* allocate code buffer for single instruction emulation. */
292 pVM->rem.s.Env.cbCodeBuffer = 4096;
293 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
294 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
295
296 /* finally, set the cpu_single_env global. */
297 cpu_single_env = &pVM->rem.s.Env;
298
299 /* Nothing is pending by default */
300 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
301
302 /*
303 * Register ram types.
304 */
305 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
306 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
307 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
308 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
309 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
310
311 /* stop ignoring. */
312 pVM->rem.s.fIgnoreAll = false;
313
314 /*
315 * Register the saved state data unit.
316 */
317 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
318 NULL, remR3Save, NULL,
319 NULL, remR3Load, NULL);
320 if (RT_FAILURE(rc))
321 return rc;
322
323#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
324 /*
325 * Debugger commands.
326 */
327 static bool fRegisteredCmds = false;
328 if (!fRegisteredCmds)
329 {
330 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
331 if (RT_SUCCESS(rc))
332 fRegisteredCmds = true;
333 }
334#endif
335
336#ifdef VBOX_WITH_STATISTICS
337 /*
338 * Statistics.
339 */
340 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
341 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
342 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
343 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
344 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
345 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
346 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
347 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
348 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
349 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
350 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
351 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
352
353 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
354
355 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
356 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
357 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
358 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
359 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
360 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
361 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
362 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
363 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
364 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
365 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
366
367 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
368 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
369 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
370 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
371
372 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
373 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
374 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
375 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
377 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
378
379 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
385
386
387#endif
388
389#ifdef DEBUG_ALL_LOGGING
390 loglevel = ~0;
391# ifdef DEBUG_TMP_LOGGING
392 logfile = fopen("/tmp/vbox-qemu.log", "w");
393# endif
394#endif
395
396 return rc;
397}
398
399
400/**
401 * Terminates the REM.
402 *
403 * Termination means cleaning up and freeing all resources,
404 * the VM it self is at this point powered off or suspended.
405 *
406 * @returns VBox status code.
407 * @param pVM The VM to operate on.
408 */
409REMR3DECL(int) REMR3Term(PVM pVM)
410{
411 return VINF_SUCCESS;
412}
413
414
415/**
416 * The VM is being reset.
417 *
418 * For the REM component this means to call the cpu_reset() and
419 * reinitialize some state variables.
420 *
421 * @param pVM VM handle.
422 */
423REMR3DECL(void) REMR3Reset(PVM pVM)
424{
425 /*
426 * Reset the REM cpu.
427 */
428 pVM->rem.s.fIgnoreAll = true;
429 cpu_reset(&pVM->rem.s.Env);
430 pVM->rem.s.cInvalidatedPages = 0;
431 pVM->rem.s.fIgnoreAll = false;
432
433 /* Clear raw ring 0 init state */
434 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
435
436 /* Flush the TBs the next time we execute code here. */
437 pVM->rem.s.fFlushTBs = true;
438}
439
440
441/**
442 * Execute state save operation.
443 *
444 * @returns VBox status code.
445 * @param pVM VM Handle.
446 * @param pSSM SSM operation handle.
447 */
448static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
449{
450 /*
451 * Save the required CPU Env bits.
452 * (Not much because we're never in REM when doing the save.)
453 */
454 PREM pRem = &pVM->rem.s;
455 LogFlow(("remR3Save:\n"));
456 Assert(!pRem->fInREM);
457 SSMR3PutU32(pSSM, pRem->Env.hflags);
458 SSMR3PutU32(pSSM, ~0); /* separator */
459
460 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
461 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
462 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
463
464 return SSMR3PutU32(pSSM, ~0); /* terminator */
465}
466
467
468/**
469 * Execute state load operation.
470 *
471 * @returns VBox status code.
472 * @param pVM VM Handle.
473 * @param pSSM SSM operation handle.
474 * @param u32Version Data layout version.
475 */
476static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
477{
478 uint32_t u32Dummy;
479 uint32_t fRawRing0 = false;
480 uint32_t u32Sep;
481 int rc;
482 PREM pRem;
483 LogFlow(("remR3Load:\n"));
484
485 /*
486 * Validate version.
487 */
488 if ( u32Version != REM_SAVED_STATE_VERSION
489 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
490 {
491 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
492 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
493 }
494
495 /*
496 * Do a reset to be on the safe side...
497 */
498 REMR3Reset(pVM);
499
500 /*
501 * Ignore all ignorable notifications.
502 * (Not doing this will cause serious trouble.)
503 */
504 pVM->rem.s.fIgnoreAll = true;
505
506 /*
507 * Load the required CPU Env bits.
508 * (Not much because we're never in REM when doing the save.)
509 */
510 pRem = &pVM->rem.s;
511 Assert(!pRem->fInREM);
512 SSMR3GetU32(pSSM, &pRem->Env.hflags);
513 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
514 {
515 /* Redundant REM CPU state has to be loaded, but can be ignored. */
516 CPUX86State_Ver16 temp;
517 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
518 }
519
520 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
521 if (RT_FAILURE(rc))
522 return rc;
523 if (u32Sep != ~0U)
524 {
525 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
526 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
527 }
528
529 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
530 SSMR3GetUInt(pSSM, &fRawRing0);
531 if (fRawRing0)
532 pRem->Env.state |= CPU_RAW_RING0;
533
534 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
535 {
536 unsigned i;
537
538 /*
539 * Load the REM stuff.
540 */
541 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
542 if (RT_FAILURE(rc))
543 return rc;
544 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
545 {
546 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
547 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
548 }
549 for (i = 0; i < pRem->cInvalidatedPages; i++)
550 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
551 }
552
553 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
554 if (RT_FAILURE(rc))
555 return rc;
556
557 /* check the terminator. */
558 rc = SSMR3GetU32(pSSM, &u32Sep);
559 if (RT_FAILURE(rc))
560 return rc;
561 if (u32Sep != ~0U)
562 {
563 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
564 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
565 }
566
567 /*
568 * Get the CPUID features.
569 */
570 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
571 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
572
573 /*
574 * Sync the Load Flush the TLB
575 */
576 tlb_flush(&pRem->Env, 1);
577
578 /*
579 * Stop ignoring ignornable notifications.
580 */
581 pVM->rem.s.fIgnoreAll = false;
582
583 /*
584 * Sync the whole CPU state when executing code in the recompiler.
585 */
586 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
587 return VINF_SUCCESS;
588}
589
590
591
592#undef LOG_GROUP
593#define LOG_GROUP LOG_GROUP_REM_RUN
594
595/**
596 * Single steps an instruction in recompiled mode.
597 *
598 * Before calling this function the REM state needs to be in sync with
599 * the VM. Call REMR3State() to perform the sync. It's only necessary
600 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
601 * and after calling REMR3StateBack().
602 *
603 * @returns VBox status code.
604 *
605 * @param pVM VM Handle.
606 */
607REMR3DECL(int) REMR3Step(PVM pVM)
608{
609 int rc, interrupt_request;
610 RTGCPTR GCPtrPC;
611 bool fBp;
612
613 /*
614 * Lock the REM - we don't wanna have anyone interrupting us
615 * while stepping - and enabled single stepping. We also ignore
616 * pending interrupts and suchlike.
617 */
618 interrupt_request = pVM->rem.s.Env.interrupt_request;
619 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
620 pVM->rem.s.Env.interrupt_request = 0;
621 cpu_single_step(&pVM->rem.s.Env, 1);
622
623 /*
624 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
625 */
626 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
627 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
628
629 /*
630 * Execute and handle the return code.
631 * We execute without enabling the cpu tick, so on success we'll
632 * just flip it on and off to make sure it moves
633 */
634 rc = cpu_exec(&pVM->rem.s.Env);
635 if (rc == EXCP_DEBUG)
636 {
637 TMCpuTickResume(pVM);
638 TMCpuTickPause(pVM);
639 TMVirtualResume(pVM);
640 TMVirtualPause(pVM);
641 rc = VINF_EM_DBG_STEPPED;
642 }
643 else
644 {
645 switch (rc)
646 {
647 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
648 case EXCP_HLT:
649 case EXCP_HALTED: rc = VINF_EM_HALT; break;
650 case EXCP_RC:
651 rc = pVM->rem.s.rc;
652 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
653 break;
654 default:
655 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
656 rc = VERR_INTERNAL_ERROR;
657 break;
658 }
659 }
660
661 /*
662 * Restore the stuff we changed to prevent interruption.
663 * Unlock the REM.
664 */
665 if (fBp)
666 {
667 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
668 Assert(rc2 == 0); NOREF(rc2);
669 }
670 cpu_single_step(&pVM->rem.s.Env, 0);
671 pVM->rem.s.Env.interrupt_request = interrupt_request;
672
673 return rc;
674}
675
676
677/**
678 * Set a breakpoint using the REM facilities.
679 *
680 * @returns VBox status code.
681 * @param pVM The VM handle.
682 * @param Address The breakpoint address.
683 * @thread The emulation thread.
684 */
685REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
686{
687 VM_ASSERT_EMT(pVM);
688 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
689 {
690 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
691 return VINF_SUCCESS;
692 }
693 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
694 return VERR_REM_NO_MORE_BP_SLOTS;
695}
696
697
698/**
699 * Clears a breakpoint set by REMR3BreakpointSet().
700 *
701 * @returns VBox status code.
702 * @param pVM The VM handle.
703 * @param Address The breakpoint address.
704 * @thread The emulation thread.
705 */
706REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
707{
708 VM_ASSERT_EMT(pVM);
709 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
710 {
711 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
712 return VINF_SUCCESS;
713 }
714 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
715 return VERR_REM_BP_NOT_FOUND;
716}
717
718
719/**
720 * Emulate an instruction.
721 *
722 * This function executes one instruction without letting anyone
723 * interrupt it. This is intended for being called while being in
724 * raw mode and thus will take care of all the state syncing between
725 * REM and the rest.
726 *
727 * @returns VBox status code.
728 * @param pVM VM handle.
729 */
730REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
731{
732 bool fFlushTBs;
733
734 int rc, rc2;
735 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
736
737 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
738 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
739 */
740 if (HWACCMIsEnabled(pVM))
741 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
742
743 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
744 fFlushTBs = pVM->rem.s.fFlushTBs;
745 pVM->rem.s.fFlushTBs = false;
746
747 /*
748 * Sync the state and enable single instruction / single stepping.
749 */
750 rc = REMR3State(pVM);
751 pVM->rem.s.fFlushTBs = fFlushTBs;
752 if (RT_SUCCESS(rc))
753 {
754 int interrupt_request = pVM->rem.s.Env.interrupt_request;
755 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
756 Assert(!pVM->rem.s.Env.singlestep_enabled);
757 /*
758 * Now we set the execute single instruction flag and enter the cpu_exec loop.
759 */
760 TMNotifyStartOfExecution(pVM);
761 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
762 rc = cpu_exec(&pVM->rem.s.Env);
763 TMNotifyEndOfExecution(pVM);
764 switch (rc)
765 {
766 /*
767 * Executed without anything out of the way happening.
768 */
769 case EXCP_SINGLE_INSTR:
770 rc = VINF_EM_RESCHEDULE;
771 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
772 break;
773
774 /*
775 * If we take a trap or start servicing a pending interrupt, we might end up here.
776 * (Timer thread or some other thread wishing EMT's attention.)
777 */
778 case EXCP_INTERRUPT:
779 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
780 rc = VINF_EM_RESCHEDULE;
781 break;
782
783 /*
784 * Single step, we assume!
785 * If there was a breakpoint there we're fucked now.
786 */
787 case EXCP_DEBUG:
788 {
789 /* breakpoint or single step? */
790 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
791 int iBP;
792 rc = VINF_EM_DBG_STEPPED;
793 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
794 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
795 {
796 rc = VINF_EM_DBG_BREAKPOINT;
797 break;
798 }
799 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
800 break;
801 }
802
803 /*
804 * hlt instruction.
805 */
806 case EXCP_HLT:
807 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
808 rc = VINF_EM_HALT;
809 break;
810
811 /*
812 * The VM has halted.
813 */
814 case EXCP_HALTED:
815 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
816 rc = VINF_EM_HALT;
817 break;
818
819 /*
820 * Switch to RAW-mode.
821 */
822 case EXCP_EXECUTE_RAW:
823 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
824 rc = VINF_EM_RESCHEDULE_RAW;
825 break;
826
827 /*
828 * Switch to hardware accelerated RAW-mode.
829 */
830 case EXCP_EXECUTE_HWACC:
831 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
832 rc = VINF_EM_RESCHEDULE_HWACC;
833 break;
834
835 /*
836 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
837 */
838 case EXCP_RC:
839 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
840 rc = pVM->rem.s.rc;
841 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
842 break;
843
844 /*
845 * Figure out the rest when they arrive....
846 */
847 default:
848 AssertMsgFailed(("rc=%d\n", rc));
849 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
850 rc = VINF_EM_RESCHEDULE;
851 break;
852 }
853
854 /*
855 * Switch back the state.
856 */
857 pVM->rem.s.Env.interrupt_request = interrupt_request;
858 rc2 = REMR3StateBack(pVM);
859 AssertRC(rc2);
860 }
861
862 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
863 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
864 return rc;
865}
866
867
868/**
869 * Runs code in recompiled mode.
870 *
871 * Before calling this function the REM state needs to be in sync with
872 * the VM. Call REMR3State() to perform the sync. It's only necessary
873 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
874 * and after calling REMR3StateBack().
875 *
876 * @returns VBox status code.
877 *
878 * @param pVM VM Handle.
879 */
880REMR3DECL(int) REMR3Run(PVM pVM)
881{
882 int rc;
883 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
884 Assert(pVM->rem.s.fInREM);
885
886 TMNotifyStartOfExecution(pVM);
887 rc = cpu_exec(&pVM->rem.s.Env);
888 TMNotifyEndOfExecution(pVM);
889 switch (rc)
890 {
891 /*
892 * This happens when the execution was interrupted
893 * by an external event, like pending timers.
894 */
895 case EXCP_INTERRUPT:
896 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
897 rc = VINF_SUCCESS;
898 break;
899
900 /*
901 * hlt instruction.
902 */
903 case EXCP_HLT:
904 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
905 rc = VINF_EM_HALT;
906 break;
907
908 /*
909 * The VM has halted.
910 */
911 case EXCP_HALTED:
912 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
913 rc = VINF_EM_HALT;
914 break;
915
916 /*
917 * Breakpoint/single step.
918 */
919 case EXCP_DEBUG:
920 {
921#if 0//def DEBUG_bird
922 static int iBP = 0;
923 printf("howdy, breakpoint! iBP=%d\n", iBP);
924 switch (iBP)
925 {
926 case 0:
927 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
928 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
929 //pVM->rem.s.Env.interrupt_request = 0;
930 //pVM->rem.s.Env.exception_index = -1;
931 //g_fInterruptDisabled = 1;
932 rc = VINF_SUCCESS;
933 asm("int3");
934 break;
935 default:
936 asm("int3");
937 break;
938 }
939 iBP++;
940#else
941 /* breakpoint or single step? */
942 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
943 int iBP;
944 rc = VINF_EM_DBG_STEPPED;
945 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
946 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
947 {
948 rc = VINF_EM_DBG_BREAKPOINT;
949 break;
950 }
951 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
952#endif
953 break;
954 }
955
956 /*
957 * Switch to RAW-mode.
958 */
959 case EXCP_EXECUTE_RAW:
960 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
961 rc = VINF_EM_RESCHEDULE_RAW;
962 break;
963
964 /*
965 * Switch to hardware accelerated RAW-mode.
966 */
967 case EXCP_EXECUTE_HWACC:
968 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
969 rc = VINF_EM_RESCHEDULE_HWACC;
970 break;
971
972 /*
973 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
974 */
975 case EXCP_RC:
976 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
977 rc = pVM->rem.s.rc;
978 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
979 break;
980
981 /*
982 * Figure out the rest when they arrive....
983 */
984 default:
985 AssertMsgFailed(("rc=%d\n", rc));
986 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
987 rc = VINF_SUCCESS;
988 break;
989 }
990
991 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
992 return rc;
993}
994
995
996/**
997 * Check if the cpu state is suitable for Raw execution.
998 *
999 * @returns boolean
1000 * @param env The CPU env struct.
1001 * @param eip The EIP to check this for (might differ from env->eip).
1002 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1003 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1004 *
1005 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1006 */
1007bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1008{
1009 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1010 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1011 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1012 uint32_t u32CR0;
1013
1014 /* Update counter. */
1015 env->pVM->rem.s.cCanExecuteRaw++;
1016
1017 if (HWACCMIsEnabled(env->pVM))
1018 {
1019 CPUMCTX Ctx;
1020
1021 env->state |= CPU_RAW_HWACC;
1022
1023 /*
1024 * Create partial context for HWACCMR3CanExecuteGuest
1025 */
1026 Ctx.cr0 = env->cr[0];
1027 Ctx.cr3 = env->cr[3];
1028 Ctx.cr4 = env->cr[4];
1029
1030 Ctx.tr = env->tr.selector;
1031 Ctx.trHid.u64Base = env->tr.base;
1032 Ctx.trHid.u32Limit = env->tr.limit;
1033 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1034
1035 Ctx.idtr.cbIdt = env->idt.limit;
1036 Ctx.idtr.pIdt = env->idt.base;
1037
1038 Ctx.gdtr.cbGdt = env->gdt.limit;
1039 Ctx.gdtr.pGdt = env->gdt.base;
1040
1041 Ctx.rsp = env->regs[R_ESP];
1042#ifdef LOG_ENABLED
1043 Ctx.rip = env->eip;
1044#endif
1045
1046 Ctx.eflags.u32 = env->eflags;
1047
1048 Ctx.cs = env->segs[R_CS].selector;
1049 Ctx.csHid.u64Base = env->segs[R_CS].base;
1050 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1051 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1052
1053 Ctx.ds = env->segs[R_DS].selector;
1054 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1055 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1056 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1057
1058 Ctx.es = env->segs[R_ES].selector;
1059 Ctx.esHid.u64Base = env->segs[R_ES].base;
1060 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1061 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1062
1063 Ctx.fs = env->segs[R_FS].selector;
1064 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1065 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1066 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1067
1068 Ctx.gs = env->segs[R_GS].selector;
1069 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1070 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1071 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1072
1073 Ctx.ss = env->segs[R_SS].selector;
1074 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1075 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1076 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1077
1078 Ctx.msrEFER = env->efer;
1079
1080 /* Hardware accelerated raw-mode:
1081 *
1082 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1083 */
1084 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1085 {
1086 *piException = EXCP_EXECUTE_HWACC;
1087 return true;
1088 }
1089 return false;
1090 }
1091
1092 /*
1093 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1094 * or 32 bits protected mode ring 0 code
1095 *
1096 * The tests are ordered by the likelyhood of being true during normal execution.
1097 */
1098 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1099 {
1100 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1101 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1102 return false;
1103 }
1104
1105#ifndef VBOX_RAW_V86
1106 if (fFlags & VM_MASK) {
1107 STAM_COUNTER_INC(&gStatRefuseVM86);
1108 Log2(("raw mode refused: VM_MASK\n"));
1109 return false;
1110 }
1111#endif
1112
1113 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1114 {
1115#ifndef DEBUG_bird
1116 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1117#endif
1118 return false;
1119 }
1120
1121 if (env->singlestep_enabled)
1122 {
1123 //Log2(("raw mode refused: Single step\n"));
1124 return false;
1125 }
1126
1127 if (env->nb_breakpoints > 0)
1128 {
1129 //Log2(("raw mode refused: Breakpoints\n"));
1130 return false;
1131 }
1132
1133 u32CR0 = env->cr[0];
1134 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1135 {
1136 STAM_COUNTER_INC(&gStatRefusePaging);
1137 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1138 return false;
1139 }
1140
1141 if (env->cr[4] & CR4_PAE_MASK)
1142 {
1143 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1144 {
1145 STAM_COUNTER_INC(&gStatRefusePAE);
1146 return false;
1147 }
1148 }
1149
1150 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1151 {
1152 if (!EMIsRawRing3Enabled(env->pVM))
1153 return false;
1154
1155 if (!(env->eflags & IF_MASK))
1156 {
1157 STAM_COUNTER_INC(&gStatRefuseIF0);
1158 Log2(("raw mode refused: IF (RawR3)\n"));
1159 return false;
1160 }
1161
1162 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1163 {
1164 STAM_COUNTER_INC(&gStatRefuseWP0);
1165 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1166 return false;
1167 }
1168 }
1169 else
1170 {
1171 if (!EMIsRawRing0Enabled(env->pVM))
1172 return false;
1173
1174 // Let's start with pure 32 bits ring 0 code first
1175 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1176 {
1177 STAM_COUNTER_INC(&gStatRefuseCode16);
1178 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1179 return false;
1180 }
1181
1182 // Only R0
1183 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1184 {
1185 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1186 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1187 return false;
1188 }
1189
1190 if (!(u32CR0 & CR0_WP_MASK))
1191 {
1192 STAM_COUNTER_INC(&gStatRefuseWP0);
1193 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1194 return false;
1195 }
1196
1197 if (PATMIsPatchGCAddr(env->pVM, eip))
1198 {
1199 Log2(("raw r0 mode forced: patch code\n"));
1200 *piException = EXCP_EXECUTE_RAW;
1201 return true;
1202 }
1203
1204#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1205 if (!(env->eflags & IF_MASK))
1206 {
1207 STAM_COUNTER_INC(&gStatRefuseIF0);
1208 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1209 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1210 return false;
1211 }
1212#endif
1213
1214 env->state |= CPU_RAW_RING0;
1215 }
1216
1217 /*
1218 * Don't reschedule the first time we're called, because there might be
1219 * special reasons why we're here that is not covered by the above checks.
1220 */
1221 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1222 {
1223 Log2(("raw mode refused: first scheduling\n"));
1224 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1225 return false;
1226 }
1227
1228 Assert(PGMPhysIsA20Enabled(env->pVM));
1229 *piException = EXCP_EXECUTE_RAW;
1230 return true;
1231}
1232
1233
1234/**
1235 * Fetches a code byte.
1236 *
1237 * @returns Success indicator (bool) for ease of use.
1238 * @param env The CPU environment structure.
1239 * @param GCPtrInstr Where to fetch code.
1240 * @param pu8Byte Where to store the byte on success
1241 */
1242bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1243{
1244 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1245 if (RT_SUCCESS(rc))
1246 return true;
1247 return false;
1248}
1249
1250
1251/**
1252 * Flush (or invalidate if you like) page table/dir entry.
1253 *
1254 * (invlpg instruction; tlb_flush_page)
1255 *
1256 * @param env Pointer to cpu environment.
1257 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1258 */
1259void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1260{
1261 PVM pVM = env->pVM;
1262 PCPUMCTX pCtx;
1263 int rc;
1264
1265 /*
1266 * When we're replaying invlpg instructions or restoring a saved
1267 * state we disable this path.
1268 */
1269 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1270 return;
1271 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1272 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1273
1274 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1275
1276 /*
1277 * Update the control registers before calling PGMFlushPage.
1278 */
1279 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1280 pCtx->cr0 = env->cr[0];
1281 pCtx->cr3 = env->cr[3];
1282 pCtx->cr4 = env->cr[4];
1283
1284 /*
1285 * Let PGM do the rest.
1286 */
1287 rc = PGMInvalidatePage(pVM, GCPtr);
1288 if (RT_FAILURE(rc))
1289 {
1290 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1291 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1292 }
1293 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1294}
1295
1296
1297#ifndef REM_PHYS_ADDR_IN_TLB
1298void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1299{
1300 void *pv;
1301 int rc;
1302
1303 /* Address must be aligned enough to fiddle with lower bits */
1304 Assert((physAddr & 0x3) == 0);
1305
1306 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1307 Assert( rc == VINF_SUCCESS
1308 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1309 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1310 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1311 if (RT_FAILURE(rc))
1312 return (void *)1;
1313 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1314 return (void *)((uintptr_t)pv | 2);
1315 return pv;
1316}
1317
1318target_ulong remR3HCVirt2GCPhys(CPUState *env1, void *addr)
1319{
1320 RTGCPHYS rv = 0;
1321 int rc;
1322
1323 rc = PGMR3DbgR3Ptr2GCPhys(env1->pVM, (RTR3PTR)addr, &rv);
1324 Assert (RT_SUCCESS(rc));
1325
1326 return (target_ulong)rv;
1327}
1328#endif
1329
1330/**
1331 * Called from tlb_protect_code in order to write monitor a code page.
1332 *
1333 * @param env Pointer to the CPU environment.
1334 * @param GCPtr Code page to monitor
1335 */
1336void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1337{
1338#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1339 Assert(env->pVM->rem.s.fInREM);
1340 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1341 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1342 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1343 && !(env->eflags & VM_MASK) /* no V86 mode */
1344 && !HWACCMIsEnabled(env->pVM))
1345 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1346#endif
1347}
1348
1349/**
1350 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1351 *
1352 * @param env Pointer to the CPU environment.
1353 * @param GCPtr Code page to monitor
1354 */
1355void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1356{
1357 Assert(env->pVM->rem.s.fInREM);
1358#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1359 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1360 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1361 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1362 && !(env->eflags & VM_MASK) /* no V86 mode */
1363 && !HWACCMIsEnabled(env->pVM))
1364 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1365#endif
1366}
1367
1368/**
1369 * Called when the CPU is initialized, any of the CRx registers are changed or
1370 * when the A20 line is modified.
1371 *
1372 * @param env Pointer to the CPU environment.
1373 * @param fGlobal Set if the flush is global.
1374 */
1375void remR3FlushTLB(CPUState *env, bool fGlobal)
1376{
1377 PVM pVM = env->pVM;
1378 PCPUMCTX pCtx;
1379
1380 /*
1381 * When we're replaying invlpg instructions or restoring a saved
1382 * state we disable this path.
1383 */
1384 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1385 return;
1386 Assert(pVM->rem.s.fInREM);
1387
1388 /*
1389 * The caller doesn't check cr4, so we have to do that for ourselves.
1390 */
1391 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1392 fGlobal = true;
1393 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1394
1395 /*
1396 * Update the control registers before calling PGMR3FlushTLB.
1397 */
1398 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1399 pCtx->cr0 = env->cr[0];
1400 pCtx->cr3 = env->cr[3];
1401 pCtx->cr4 = env->cr[4];
1402
1403 /*
1404 * Let PGM do the rest.
1405 */
1406 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1407}
1408
1409
1410/**
1411 * Called when any of the cr0, cr4 or efer registers is updated.
1412 *
1413 * @param env Pointer to the CPU environment.
1414 */
1415void remR3ChangeCpuMode(CPUState *env)
1416{
1417 int rc;
1418 PVM pVM = env->pVM;
1419 PCPUMCTX pCtx;
1420
1421 /*
1422 * When we're replaying loads or restoring a saved
1423 * state this path is disabled.
1424 */
1425 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1426 return;
1427 Assert(pVM->rem.s.fInREM);
1428
1429 /*
1430 * Update the control registers before calling PGMChangeMode()
1431 * as it may need to map whatever cr3 is pointing to.
1432 */
1433 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1434 pCtx->cr0 = env->cr[0];
1435 pCtx->cr3 = env->cr[3];
1436 pCtx->cr4 = env->cr[4];
1437
1438#ifdef TARGET_X86_64
1439 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1440 if (rc != VINF_SUCCESS)
1441 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1442#else
1443 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1444 if (rc != VINF_SUCCESS)
1445 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1446#endif
1447}
1448
1449
1450/**
1451 * Called from compiled code to run dma.
1452 *
1453 * @param env Pointer to the CPU environment.
1454 */
1455void remR3DmaRun(CPUState *env)
1456{
1457 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1458 PDMR3DmaRun(env->pVM);
1459 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1460}
1461
1462
1463/**
1464 * Called from compiled code to schedule pending timers in VMM
1465 *
1466 * @param env Pointer to the CPU environment.
1467 */
1468void remR3TimersRun(CPUState *env)
1469{
1470 LogFlow(("remR3TimersRun:\n"));
1471 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1472 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1473 TMR3TimerQueuesDo(env->pVM);
1474 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1475 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1476}
1477
1478
1479/**
1480 * Record trap occurance
1481 *
1482 * @returns VBox status code
1483 * @param env Pointer to the CPU environment.
1484 * @param uTrap Trap nr
1485 * @param uErrorCode Error code
1486 * @param pvNextEIP Next EIP
1487 */
1488int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1489{
1490 PVM pVM = env->pVM;
1491#ifdef VBOX_WITH_STATISTICS
1492 static STAMCOUNTER s_aStatTrap[255];
1493 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1494#endif
1495
1496#ifdef VBOX_WITH_STATISTICS
1497 if (uTrap < 255)
1498 {
1499 if (!s_aRegisters[uTrap])
1500 {
1501 char szStatName[64];
1502 s_aRegisters[uTrap] = true;
1503 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1504 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1505 }
1506 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1507 }
1508#endif
1509 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1510 if( uTrap < 0x20
1511 && (env->cr[0] & X86_CR0_PE)
1512 && !(env->eflags & X86_EFL_VM))
1513 {
1514#ifdef DEBUG
1515 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1516#endif
1517 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1518 {
1519 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1520 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1521 return VERR_REM_TOO_MANY_TRAPS;
1522 }
1523 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1524 pVM->rem.s.cPendingExceptions = 1;
1525 pVM->rem.s.uPendingException = uTrap;
1526 pVM->rem.s.uPendingExcptEIP = env->eip;
1527 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1528 }
1529 else
1530 {
1531 pVM->rem.s.cPendingExceptions = 0;
1532 pVM->rem.s.uPendingException = uTrap;
1533 pVM->rem.s.uPendingExcptEIP = env->eip;
1534 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1535 }
1536 return VINF_SUCCESS;
1537}
1538
1539
1540/*
1541 * Clear current active trap
1542 *
1543 * @param pVM VM Handle.
1544 */
1545void remR3TrapClear(PVM pVM)
1546{
1547 pVM->rem.s.cPendingExceptions = 0;
1548 pVM->rem.s.uPendingException = 0;
1549 pVM->rem.s.uPendingExcptEIP = 0;
1550 pVM->rem.s.uPendingExcptCR2 = 0;
1551}
1552
1553
1554/*
1555 * Record previous call instruction addresses
1556 *
1557 * @param env Pointer to the CPU environment.
1558 */
1559void remR3RecordCall(CPUState *env)
1560{
1561 CSAMR3RecordCallAddress(env->pVM, env->eip);
1562}
1563
1564
1565/**
1566 * Syncs the internal REM state with the VM.
1567 *
1568 * This must be called before REMR3Run() is invoked whenever when the REM
1569 * state is not up to date. Calling it several times in a row is not
1570 * permitted.
1571 *
1572 * @returns VBox status code.
1573 *
1574 * @param pVM VM Handle.
1575 * @param fFlushTBs Flush all translation blocks before executing code
1576 *
1577 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1578 * no do this since the majority of the callers don't want any unnecessary of events
1579 * pending that would immediatly interrupt execution.
1580 */
1581REMR3DECL(int) REMR3State(PVM pVM)
1582{
1583 register const CPUMCTX *pCtx;
1584 register unsigned fFlags;
1585 bool fHiddenSelRegsValid;
1586 unsigned i;
1587 TRPMEVENT enmType;
1588 uint8_t u8TrapNo;
1589 int rc;
1590
1591 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1592 Log2(("REMR3State:\n"));
1593
1594 pCtx = pVM->rem.s.pCtx;
1595 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1596
1597 Assert(!pVM->rem.s.fInREM);
1598 pVM->rem.s.fInStateSync = true;
1599
1600 /*
1601 * If we have to flush TBs, do that immediately.
1602 */
1603 if (pVM->rem.s.fFlushTBs)
1604 {
1605 STAM_COUNTER_INC(&gStatFlushTBs);
1606 tb_flush(&pVM->rem.s.Env);
1607 pVM->rem.s.fFlushTBs = false;
1608 }
1609
1610 /*
1611 * Copy the registers which require no special handling.
1612 */
1613#ifdef TARGET_X86_64
1614 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1615 Assert(R_EAX == 0);
1616 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1617 Assert(R_ECX == 1);
1618 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1619 Assert(R_EDX == 2);
1620 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1621 Assert(R_EBX == 3);
1622 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1623 Assert(R_ESP == 4);
1624 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1625 Assert(R_EBP == 5);
1626 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1627 Assert(R_ESI == 6);
1628 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1629 Assert(R_EDI == 7);
1630 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1631 pVM->rem.s.Env.regs[8] = pCtx->r8;
1632 pVM->rem.s.Env.regs[9] = pCtx->r9;
1633 pVM->rem.s.Env.regs[10] = pCtx->r10;
1634 pVM->rem.s.Env.regs[11] = pCtx->r11;
1635 pVM->rem.s.Env.regs[12] = pCtx->r12;
1636 pVM->rem.s.Env.regs[13] = pCtx->r13;
1637 pVM->rem.s.Env.regs[14] = pCtx->r14;
1638 pVM->rem.s.Env.regs[15] = pCtx->r15;
1639
1640 pVM->rem.s.Env.eip = pCtx->rip;
1641
1642 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1643#else
1644 Assert(R_EAX == 0);
1645 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1646 Assert(R_ECX == 1);
1647 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1648 Assert(R_EDX == 2);
1649 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1650 Assert(R_EBX == 3);
1651 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1652 Assert(R_ESP == 4);
1653 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1654 Assert(R_EBP == 5);
1655 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1656 Assert(R_ESI == 6);
1657 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1658 Assert(R_EDI == 7);
1659 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1660 pVM->rem.s.Env.eip = pCtx->eip;
1661
1662 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1663#endif
1664
1665 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1666
1667 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1668 for (i=0;i<8;i++)
1669 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1670
1671 /*
1672 * Clear the halted hidden flag (the interrupt waking up the CPU can
1673 * have been dispatched in raw mode).
1674 */
1675 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1676
1677 /*
1678 * Replay invlpg?
1679 */
1680 if (pVM->rem.s.cInvalidatedPages)
1681 {
1682 RTUINT i;
1683
1684 pVM->rem.s.fIgnoreInvlPg = true;
1685 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1686 {
1687 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1688 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1689 }
1690 pVM->rem.s.fIgnoreInvlPg = false;
1691 pVM->rem.s.cInvalidatedPages = 0;
1692 }
1693
1694 /* Replay notification changes? */
1695 if (pVM->rem.s.cHandlerNotifications)
1696 REMR3ReplayHandlerNotifications(pVM);
1697
1698 /* Update MSRs; before CRx registers! */
1699 pVM->rem.s.Env.efer = pCtx->msrEFER;
1700 pVM->rem.s.Env.star = pCtx->msrSTAR;
1701 pVM->rem.s.Env.pat = pCtx->msrPAT;
1702#ifdef TARGET_X86_64
1703 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1704 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1705 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1706 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1707
1708 /* Update the internal long mode activate flag according to the new EFER value. */
1709 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1710 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1711 else
1712 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1713#endif
1714
1715
1716 /*
1717 * Registers which are rarely changed and require special handling / order when changed.
1718 */
1719 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1720 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1721 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1722 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1723 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1724 {
1725 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1726 {
1727 pVM->rem.s.fIgnoreCR3Load = true;
1728 tlb_flush(&pVM->rem.s.Env, true);
1729 pVM->rem.s.fIgnoreCR3Load = false;
1730 }
1731
1732 /* CR4 before CR0! */
1733 if (fFlags & CPUM_CHANGED_CR4)
1734 {
1735 pVM->rem.s.fIgnoreCR3Load = true;
1736 pVM->rem.s.fIgnoreCpuMode = true;
1737 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1738 pVM->rem.s.fIgnoreCpuMode = false;
1739 pVM->rem.s.fIgnoreCR3Load = false;
1740 }
1741
1742 if (fFlags & CPUM_CHANGED_CR0)
1743 {
1744 pVM->rem.s.fIgnoreCR3Load = true;
1745 pVM->rem.s.fIgnoreCpuMode = true;
1746 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1747 pVM->rem.s.fIgnoreCpuMode = false;
1748 pVM->rem.s.fIgnoreCR3Load = false;
1749 }
1750
1751 if (fFlags & CPUM_CHANGED_CR3)
1752 {
1753 pVM->rem.s.fIgnoreCR3Load = true;
1754 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1755 pVM->rem.s.fIgnoreCR3Load = false;
1756 }
1757
1758 if (fFlags & CPUM_CHANGED_GDTR)
1759 {
1760 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1761 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1762 }
1763
1764 if (fFlags & CPUM_CHANGED_IDTR)
1765 {
1766 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1767 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1768 }
1769
1770 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1771 {
1772 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1773 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1774 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1775 }
1776
1777 if (fFlags & CPUM_CHANGED_LDTR)
1778 {
1779 if (fHiddenSelRegsValid)
1780 {
1781 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1782 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1783 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1784 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1785 }
1786 else
1787 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1788 }
1789
1790 if (fFlags & CPUM_CHANGED_CPUID)
1791 {
1792 uint32_t u32Dummy;
1793
1794 /*
1795 * Get the CPUID features.
1796 */
1797 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1798 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1799 }
1800
1801 /* Sync FPU state after CR4, CPUID and EFER (!). */
1802 if (fFlags & CPUM_CHANGED_FPU_REM)
1803 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1804 }
1805
1806 /*
1807 * Sync TR unconditionally to make life simpler.
1808 */
1809 pVM->rem.s.Env.tr.selector = pCtx->tr;
1810 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1811 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1812 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1813 /* Note! do_interrupt will fault if the busy flag is still set... */
1814 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1815
1816 /*
1817 * Update selector registers.
1818 * This must be done *after* we've synced gdt, ldt and crX registers
1819 * since we're reading the GDT/LDT om sync_seg. This will happen with
1820 * saved state which takes a quick dip into rawmode for instance.
1821 */
1822 /*
1823 * Stack; Note first check this one as the CPL might have changed. The
1824 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1825 */
1826
1827 if (fHiddenSelRegsValid)
1828 {
1829 /* The hidden selector registers are valid in the CPU context. */
1830 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1831
1832 /* Set current CPL */
1833 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1834
1835 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1836 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1837 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1838 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1839 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1840 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1841 }
1842 else
1843 {
1844 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1845 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1846 {
1847 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1848
1849 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1850 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1851#ifdef VBOX_WITH_STATISTICS
1852 if (pVM->rem.s.Env.segs[R_SS].newselector)
1853 {
1854 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1855 }
1856#endif
1857 }
1858 else
1859 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1860
1861 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1862 {
1863 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1864 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1865#ifdef VBOX_WITH_STATISTICS
1866 if (pVM->rem.s.Env.segs[R_ES].newselector)
1867 {
1868 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1869 }
1870#endif
1871 }
1872 else
1873 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1874
1875 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1876 {
1877 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1878 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1879#ifdef VBOX_WITH_STATISTICS
1880 if (pVM->rem.s.Env.segs[R_CS].newselector)
1881 {
1882 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1883 }
1884#endif
1885 }
1886 else
1887 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1888
1889 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1890 {
1891 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1892 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1893#ifdef VBOX_WITH_STATISTICS
1894 if (pVM->rem.s.Env.segs[R_DS].newselector)
1895 {
1896 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1897 }
1898#endif
1899 }
1900 else
1901 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1902
1903 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1904 * be the same but not the base/limit. */
1905 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1906 {
1907 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1908 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1909#ifdef VBOX_WITH_STATISTICS
1910 if (pVM->rem.s.Env.segs[R_FS].newselector)
1911 {
1912 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1913 }
1914#endif
1915 }
1916 else
1917 pVM->rem.s.Env.segs[R_FS].newselector = 0;
1918
1919 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
1920 {
1921 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
1922 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
1923#ifdef VBOX_WITH_STATISTICS
1924 if (pVM->rem.s.Env.segs[R_GS].newselector)
1925 {
1926 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
1927 }
1928#endif
1929 }
1930 else
1931 pVM->rem.s.Env.segs[R_GS].newselector = 0;
1932 }
1933
1934 /*
1935 * Check for traps.
1936 */
1937 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
1938 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
1939 if (RT_SUCCESS(rc))
1940 {
1941#ifdef DEBUG
1942 if (u8TrapNo == 0x80)
1943 {
1944 remR3DumpLnxSyscall(pVM);
1945 remR3DumpOBsdSyscall(pVM);
1946 }
1947#endif
1948
1949 pVM->rem.s.Env.exception_index = u8TrapNo;
1950 if (enmType != TRPM_SOFTWARE_INT)
1951 {
1952 pVM->rem.s.Env.exception_is_int = 0;
1953 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
1954 }
1955 else
1956 {
1957 /*
1958 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
1959 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
1960 * for int03 and into.
1961 */
1962 pVM->rem.s.Env.exception_is_int = 1;
1963 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
1964 /* int 3 may be generated by one-byte 0xcc */
1965 if (u8TrapNo == 3)
1966 {
1967 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
1968 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1969 }
1970 /* int 4 may be generated by one-byte 0xce */
1971 else if (u8TrapNo == 4)
1972 {
1973 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
1974 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
1975 }
1976 }
1977
1978 /* get error code and cr2 if needed. */
1979 switch (u8TrapNo)
1980 {
1981 case 0x0e:
1982 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
1983 /* fallthru */
1984 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
1985 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
1986 break;
1987
1988 case 0x11: case 0x08:
1989 default:
1990 pVM->rem.s.Env.error_code = 0;
1991 break;
1992 }
1993
1994 /*
1995 * We can now reset the active trap since the recompiler is gonna have a go at it.
1996 */
1997 rc = TRPMResetTrap(pVM);
1998 AssertRC(rc);
1999 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2000 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2001 }
2002
2003 /*
2004 * Clear old interrupt request flags; Check for pending hardware interrupts.
2005 * (See @remark for why we don't check for other FFs.)
2006 */
2007 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2008 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2009 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2010 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2011
2012 /*
2013 * We're now in REM mode.
2014 */
2015 pVM->rem.s.fInREM = true;
2016 pVM->rem.s.fInStateSync = false;
2017 pVM->rem.s.cCanExecuteRaw = 0;
2018 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2019 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2020 return VINF_SUCCESS;
2021}
2022
2023
2024/**
2025 * Syncs back changes in the REM state to the the VM state.
2026 *
2027 * This must be called after invoking REMR3Run().
2028 * Calling it several times in a row is not permitted.
2029 *
2030 * @returns VBox status code.
2031 *
2032 * @param pVM VM Handle.
2033 */
2034REMR3DECL(int) REMR3StateBack(PVM pVM)
2035{
2036 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2037 unsigned i;
2038
2039 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2040 Log2(("REMR3StateBack:\n"));
2041 Assert(pVM->rem.s.fInREM);
2042
2043 /*
2044 * Copy back the registers.
2045 * This is done in the order they are declared in the CPUMCTX structure.
2046 */
2047
2048 /** @todo FOP */
2049 /** @todo FPUIP */
2050 /** @todo CS */
2051 /** @todo FPUDP */
2052 /** @todo DS */
2053
2054 /** @todo check if FPU/XMM was actually used in the recompiler */
2055 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2056//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2057
2058#ifdef TARGET_X86_64
2059 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2060 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2061 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2062 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2063 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2064 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2065 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2066 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2067 pCtx->r8 = pVM->rem.s.Env.regs[8];
2068 pCtx->r9 = pVM->rem.s.Env.regs[9];
2069 pCtx->r10 = pVM->rem.s.Env.regs[10];
2070 pCtx->r11 = pVM->rem.s.Env.regs[11];
2071 pCtx->r12 = pVM->rem.s.Env.regs[12];
2072 pCtx->r13 = pVM->rem.s.Env.regs[13];
2073 pCtx->r14 = pVM->rem.s.Env.regs[14];
2074 pCtx->r15 = pVM->rem.s.Env.regs[15];
2075
2076 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2077
2078#else
2079 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2080 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2081 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2082 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2083 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2084 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2085 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2086
2087 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2088#endif
2089
2090 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2091
2092#ifdef VBOX_WITH_STATISTICS
2093 if (pVM->rem.s.Env.segs[R_SS].newselector)
2094 {
2095 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2096 }
2097 if (pVM->rem.s.Env.segs[R_GS].newselector)
2098 {
2099 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2100 }
2101 if (pVM->rem.s.Env.segs[R_FS].newselector)
2102 {
2103 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2104 }
2105 if (pVM->rem.s.Env.segs[R_ES].newselector)
2106 {
2107 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2108 }
2109 if (pVM->rem.s.Env.segs[R_DS].newselector)
2110 {
2111 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2112 }
2113 if (pVM->rem.s.Env.segs[R_CS].newselector)
2114 {
2115 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2116 }
2117#endif
2118 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2119 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2120 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2121 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2122 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2123
2124#ifdef TARGET_X86_64
2125 pCtx->rip = pVM->rem.s.Env.eip;
2126 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2127#else
2128 pCtx->eip = pVM->rem.s.Env.eip;
2129 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2130#endif
2131
2132 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2133 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2134 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2135 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2136
2137 for (i=0;i<8;i++)
2138 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2139
2140 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2141 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2142 {
2143 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2144 STAM_COUNTER_INC(&gStatREMGDTChange);
2145 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2146 }
2147
2148 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2149 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2150 {
2151 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2152 STAM_COUNTER_INC(&gStatREMIDTChange);
2153 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2154 }
2155
2156 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2157 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2158 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2159 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2160 {
2161 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2162 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2163 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2164 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2165 STAM_COUNTER_INC(&gStatREMLDTRChange);
2166 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2167 }
2168
2169 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2170 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2171 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2172 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2173 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2174 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2175 : 0) )
2176 {
2177 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2178 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2179 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2180 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2181 pCtx->tr = pVM->rem.s.Env.tr.selector;
2182 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2183 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2184 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2185 if (pCtx->trHid.Attr.u)
2186 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2187 STAM_COUNTER_INC(&gStatREMTRChange);
2188 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2189 }
2190
2191 /** @todo These values could still be out of sync! */
2192 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2193 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2194 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2195 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2196
2197 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2198 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2199 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2200
2201 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2202 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2203 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2204
2205 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2206 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2207 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2208
2209 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2210 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2211 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2212
2213 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2214 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2215 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2216
2217 /* Sysenter MSR */
2218 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2219 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2220 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2221
2222 /* System MSRs. */
2223 pCtx->msrEFER = pVM->rem.s.Env.efer;
2224 pCtx->msrSTAR = pVM->rem.s.Env.star;
2225 pCtx->msrPAT = pVM->rem.s.Env.pat;
2226#ifdef TARGET_X86_64
2227 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2228 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2229 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2230 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2231#endif
2232
2233 remR3TrapClear(pVM);
2234
2235 /*
2236 * Check for traps.
2237 */
2238 if ( pVM->rem.s.Env.exception_index >= 0
2239 && pVM->rem.s.Env.exception_index < 256)
2240 {
2241 int rc;
2242
2243 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2244 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2245 AssertRC(rc);
2246 switch (pVM->rem.s.Env.exception_index)
2247 {
2248 case 0x0e:
2249 TRPMSetFaultAddress(pVM, pCtx->cr2);
2250 /* fallthru */
2251 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2252 case 0x11: case 0x08: /* 0 */
2253 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2254 break;
2255 }
2256
2257 }
2258
2259 /*
2260 * We're not longer in REM mode.
2261 */
2262 pVM->rem.s.fInREM = false;
2263 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2264 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2265 return VINF_SUCCESS;
2266}
2267
2268
2269/**
2270 * This is called by the disassembler when it wants to update the cpu state
2271 * before for instance doing a register dump.
2272 */
2273static void remR3StateUpdate(PVM pVM)
2274{
2275 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2276 unsigned i;
2277
2278 Assert(pVM->rem.s.fInREM);
2279
2280 /*
2281 * Copy back the registers.
2282 * This is done in the order they are declared in the CPUMCTX structure.
2283 */
2284
2285 /** @todo FOP */
2286 /** @todo FPUIP */
2287 /** @todo CS */
2288 /** @todo FPUDP */
2289 /** @todo DS */
2290 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2291 pCtx->fpu.MXCSR = 0;
2292 pCtx->fpu.MXCSR_MASK = 0;
2293
2294 /** @todo check if FPU/XMM was actually used in the recompiler */
2295 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2296//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2297
2298#ifdef TARGET_X86_64
2299 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2300 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2301 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2302 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2303 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2304 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2305 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2306 pCtx->r8 = pVM->rem.s.Env.regs[8];
2307 pCtx->r9 = pVM->rem.s.Env.regs[9];
2308 pCtx->r10 = pVM->rem.s.Env.regs[10];
2309 pCtx->r11 = pVM->rem.s.Env.regs[11];
2310 pCtx->r12 = pVM->rem.s.Env.regs[12];
2311 pCtx->r13 = pVM->rem.s.Env.regs[13];
2312 pCtx->r14 = pVM->rem.s.Env.regs[14];
2313 pCtx->r15 = pVM->rem.s.Env.regs[15];
2314
2315 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2316#else
2317 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2318 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2319 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2320 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2321 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2322 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2323 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2324
2325 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2326#endif
2327
2328 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2329
2330 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2331 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2332 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2333 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2334 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2335
2336#ifdef TARGET_X86_64
2337 pCtx->rip = pVM->rem.s.Env.eip;
2338 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2339#else
2340 pCtx->eip = pVM->rem.s.Env.eip;
2341 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2342#endif
2343
2344 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2345 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2346 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2347 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2348
2349 for (i=0;i<8;i++)
2350 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2351
2352 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2353 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2354 {
2355 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2356 STAM_COUNTER_INC(&gStatREMGDTChange);
2357 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2358 }
2359
2360 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2361 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2362 {
2363 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2364 STAM_COUNTER_INC(&gStatREMIDTChange);
2365 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2366 }
2367
2368 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2369 {
2370 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2371 STAM_COUNTER_INC(&gStatREMLDTRChange);
2372 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2373 }
2374 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2375 {
2376 pCtx->tr = pVM->rem.s.Env.tr.selector;
2377 STAM_COUNTER_INC(&gStatREMTRChange);
2378 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2379 }
2380
2381 /** @todo These values could still be out of sync! */
2382 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2383 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2384 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2385 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2386
2387 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2388 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2389 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2390
2391 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2392 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2393 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2394
2395 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2396 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2397 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2398
2399 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2400 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2401 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2402
2403 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2404 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2405 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2406
2407 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2408 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2409 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2410
2411 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2412 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2413 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xFFFF;
2414
2415 /* Sysenter MSR */
2416 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2417 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2418 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2419
2420 /* System MSRs. */
2421 pCtx->msrEFER = pVM->rem.s.Env.efer;
2422 pCtx->msrSTAR = pVM->rem.s.Env.star;
2423 pCtx->msrPAT = pVM->rem.s.Env.pat;
2424#ifdef TARGET_X86_64
2425 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2426 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2427 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2428 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2429#endif
2430
2431}
2432
2433
2434/**
2435 * Update the VMM state information if we're currently in REM.
2436 *
2437 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2438 * we're currently executing in REM and the VMM state is invalid. This method will of
2439 * course check that we're executing in REM before syncing any data over to the VMM.
2440 *
2441 * @param pVM The VM handle.
2442 */
2443REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2444{
2445 if (pVM->rem.s.fInREM)
2446 remR3StateUpdate(pVM);
2447}
2448
2449
2450#undef LOG_GROUP
2451#define LOG_GROUP LOG_GROUP_REM
2452
2453
2454/**
2455 * Notify the recompiler about Address Gate 20 state change.
2456 *
2457 * This notification is required since A20 gate changes are
2458 * initialized from a device driver and the VM might just as
2459 * well be in REM mode as in RAW mode.
2460 *
2461 * @param pVM VM handle.
2462 * @param fEnable True if the gate should be enabled.
2463 * False if the gate should be disabled.
2464 */
2465REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2466{
2467 bool fSaved;
2468
2469 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2470 VM_ASSERT_EMT(pVM);
2471
2472 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2473 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2474
2475 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2476
2477 pVM->rem.s.fIgnoreAll = fSaved;
2478}
2479
2480
2481/**
2482 * Replays the invalidated recorded pages.
2483 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2484 *
2485 * @param pVM VM handle.
2486 */
2487REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2488{
2489 RTUINT i;
2490
2491 VM_ASSERT_EMT(pVM);
2492
2493 /*
2494 * Sync the required registers.
2495 */
2496 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2497 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2498 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2499 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2500
2501 /*
2502 * Replay the flushes.
2503 */
2504 pVM->rem.s.fIgnoreInvlPg = true;
2505 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2506 {
2507 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2508 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2509 }
2510 pVM->rem.s.fIgnoreInvlPg = false;
2511 pVM->rem.s.cInvalidatedPages = 0;
2512}
2513
2514
2515/**
2516 * Replays the handler notification changes
2517 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2518 *
2519 * @param pVM VM handle.
2520 */
2521REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2522{
2523 /*
2524 * Replay the flushes.
2525 */
2526 RTUINT i;
2527 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2528
2529 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2530 VM_ASSERT_EMT(pVM);
2531
2532 pVM->rem.s.cHandlerNotifications = 0;
2533 for (i = 0; i < c; i++)
2534 {
2535 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2536 switch (pRec->enmKind)
2537 {
2538 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2539 REMR3NotifyHandlerPhysicalRegister(pVM,
2540 pRec->u.PhysicalRegister.enmType,
2541 pRec->u.PhysicalRegister.GCPhys,
2542 pRec->u.PhysicalRegister.cb,
2543 pRec->u.PhysicalRegister.fHasHCHandler);
2544 break;
2545
2546 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2547 REMR3NotifyHandlerPhysicalDeregister(pVM,
2548 pRec->u.PhysicalDeregister.enmType,
2549 pRec->u.PhysicalDeregister.GCPhys,
2550 pRec->u.PhysicalDeregister.cb,
2551 pRec->u.PhysicalDeregister.fHasHCHandler,
2552 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2553 break;
2554
2555 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2556 REMR3NotifyHandlerPhysicalModify(pVM,
2557 pRec->u.PhysicalModify.enmType,
2558 pRec->u.PhysicalModify.GCPhysOld,
2559 pRec->u.PhysicalModify.GCPhysNew,
2560 pRec->u.PhysicalModify.cb,
2561 pRec->u.PhysicalModify.fHasHCHandler,
2562 pRec->u.PhysicalModify.fRestoreAsRAM);
2563 break;
2564
2565 default:
2566 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2567 break;
2568 }
2569 }
2570 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2571}
2572
2573
2574/**
2575 * Notify REM about changed code page.
2576 *
2577 * @returns VBox status code.
2578 * @param pVM VM handle.
2579 * @param pvCodePage Code page address
2580 */
2581REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2582{
2583#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2584 int rc;
2585 RTGCPHYS PhysGC;
2586 uint64_t flags;
2587
2588 VM_ASSERT_EMT(pVM);
2589
2590 /*
2591 * Get the physical page address.
2592 */
2593 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2594 if (rc == VINF_SUCCESS)
2595 {
2596 /*
2597 * Sync the required registers and flush the whole page.
2598 * (Easier to do the whole page than notifying it about each physical
2599 * byte that was changed.
2600 */
2601 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2602 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2603 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2604 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2605
2606 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2607 }
2608#endif
2609 return VINF_SUCCESS;
2610}
2611
2612
2613/**
2614 * Notification about a successful MMR3PhysRegister() call.
2615 *
2616 * @param pVM VM handle.
2617 * @param GCPhys The physical address the RAM.
2618 * @param cb Size of the memory.
2619 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2620 */
2621REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, unsigned fFlags)
2622{
2623 uint32_t cbBitmap;
2624 int rc;
2625 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%d fFlags=%d\n", GCPhys, cb, fFlags));
2626 VM_ASSERT_EMT(pVM);
2627
2628 /*
2629 * Validate input - we trust the caller.
2630 */
2631 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2632 Assert(cb);
2633 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2634
2635 /*
2636 * Base ram?
2637 */
2638 if (!GCPhys)
2639 {
2640 phys_ram_size = cb;
2641 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2642#ifndef VBOX_STRICT
2643 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2644 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2645#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2646 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2647 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2648 cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2649 rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2650 AssertRC(rc);
2651 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2652#endif
2653 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2654 }
2655
2656 /*
2657 * Register the ram.
2658 */
2659 Assert(!pVM->rem.s.fIgnoreAll);
2660 pVM->rem.s.fIgnoreAll = true;
2661
2662#ifdef VBOX_WITH_NEW_PHYS_CODE
2663 if (fFlags & MM_RAM_FLAGS_RESERVED)
2664 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2665 else
2666 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2667#else
2668 if (!GCPhys)
2669 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2670 else
2671 {
2672 if (fFlags & MM_RAM_FLAGS_RESERVED)
2673 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2674 else
2675 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2676 }
2677#endif
2678 Assert(pVM->rem.s.fIgnoreAll);
2679 pVM->rem.s.fIgnoreAll = false;
2680}
2681
2682#ifndef VBOX_WITH_NEW_PHYS_CODE
2683
2684/**
2685 * Notification about a successful PGMR3PhysRegisterChunk() call.
2686 *
2687 * @param pVM VM handle.
2688 * @param GCPhys The physical address the RAM.
2689 * @param cb Size of the memory.
2690 * @param pvRam The HC address of the RAM.
2691 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2692 */
2693REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2694{
2695 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2696 VM_ASSERT_EMT(pVM);
2697
2698 /*
2699 * Validate input - we trust the caller.
2700 */
2701 Assert(pvRam);
2702 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2703 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2704 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2705 Assert(fFlags == 0 /* normal RAM */);
2706 Assert(!pVM->rem.s.fIgnoreAll);
2707 pVM->rem.s.fIgnoreAll = true;
2708 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2709 Assert(pVM->rem.s.fIgnoreAll);
2710 pVM->rem.s.fIgnoreAll = false;
2711}
2712
2713
2714/**
2715 * Grows dynamically allocated guest RAM.
2716 * Will raise a fatal error if the operation fails.
2717 *
2718 * @param physaddr The physical address.
2719 */
2720void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2721{
2722 int rc;
2723 PVM pVM = cpu_single_env->pVM;
2724 const RTGCPHYS GCPhys = physaddr;
2725
2726 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2727 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2728 if (RT_SUCCESS(rc))
2729 return;
2730
2731 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2732 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2733 AssertFatalFailed();
2734}
2735
2736#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2737
2738/**
2739 * Notification about a successful MMR3PhysRomRegister() call.
2740 *
2741 * @param pVM VM handle.
2742 * @param GCPhys The physical address of the ROM.
2743 * @param cb The size of the ROM.
2744 * @param pvCopy Pointer to the ROM copy.
2745 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2746 * This function will be called when ever the protection of the
2747 * shadow ROM changes (at reset and end of POST).
2748 */
2749REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2750{
2751 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2752 VM_ASSERT_EMT(pVM);
2753
2754 /*
2755 * Validate input - we trust the caller.
2756 */
2757 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2758 Assert(cb);
2759 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2760 Assert(pvCopy);
2761 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2762
2763 /*
2764 * Register the rom.
2765 */
2766 Assert(!pVM->rem.s.fIgnoreAll);
2767 pVM->rem.s.fIgnoreAll = true;
2768
2769 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2770
2771 Log2(("%.64Rhxd\n", (char *)pvCopy + cb - 64));
2772
2773 Assert(pVM->rem.s.fIgnoreAll);
2774 pVM->rem.s.fIgnoreAll = false;
2775}
2776
2777
2778/**
2779 * Notification about a successful memory deregistration or reservation.
2780 *
2781 * @param pVM VM Handle.
2782 * @param GCPhys Start physical address.
2783 * @param cb The size of the range.
2784 * @todo Rename to REMR3NotifyPhysRamDeregister (for MMIO2) as we won't
2785 * reserve any memory soon.
2786 */
2787REMR3DECL(void) REMR3NotifyPhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2788{
2789 Log(("REMR3NotifyPhysReserve: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2790 VM_ASSERT_EMT(pVM);
2791
2792 /*
2793 * Validate input - we trust the caller.
2794 */
2795 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2796 Assert(cb);
2797 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2798
2799 /*
2800 * Unassigning the memory.
2801 */
2802 Assert(!pVM->rem.s.fIgnoreAll);
2803 pVM->rem.s.fIgnoreAll = true;
2804
2805 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2806
2807 Assert(pVM->rem.s.fIgnoreAll);
2808 pVM->rem.s.fIgnoreAll = false;
2809}
2810
2811
2812/**
2813 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2814 *
2815 * @param pVM VM Handle.
2816 * @param enmType Handler type.
2817 * @param GCPhys Handler range address.
2818 * @param cb Size of the handler range.
2819 * @param fHasHCHandler Set if the handler has a HC callback function.
2820 *
2821 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2822 * Handler memory type to memory which has no HC handler.
2823 */
2824REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2825{
2826 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2827 enmType, GCPhys, cb, fHasHCHandler));
2828 VM_ASSERT_EMT(pVM);
2829 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2830 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2831
2832 if (pVM->rem.s.cHandlerNotifications)
2833 REMR3ReplayHandlerNotifications(pVM);
2834
2835 Assert(!pVM->rem.s.fIgnoreAll);
2836 pVM->rem.s.fIgnoreAll = true;
2837
2838 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2839 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2840 else if (fHasHCHandler)
2841 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2842
2843 Assert(pVM->rem.s.fIgnoreAll);
2844 pVM->rem.s.fIgnoreAll = false;
2845}
2846
2847
2848/**
2849 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2850 *
2851 * @param pVM VM Handle.
2852 * @param enmType Handler type.
2853 * @param GCPhys Handler range address.
2854 * @param cb Size of the handler range.
2855 * @param fHasHCHandler Set if the handler has a HC callback function.
2856 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2857 */
2858REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2859{
2860 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2861 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2862 VM_ASSERT_EMT(pVM);
2863
2864 if (pVM->rem.s.cHandlerNotifications)
2865 REMR3ReplayHandlerNotifications(pVM);
2866
2867 Assert(!pVM->rem.s.fIgnoreAll);
2868 pVM->rem.s.fIgnoreAll = true;
2869
2870/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2871 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2872 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2873 else if (fHasHCHandler)
2874 {
2875 if (!fRestoreAsRAM)
2876 {
2877 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2878 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2879 }
2880 else
2881 {
2882 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2883 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2884 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2885 }
2886 }
2887
2888 Assert(pVM->rem.s.fIgnoreAll);
2889 pVM->rem.s.fIgnoreAll = false;
2890}
2891
2892
2893/**
2894 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2895 *
2896 * @param pVM VM Handle.
2897 * @param enmType Handler type.
2898 * @param GCPhysOld Old handler range address.
2899 * @param GCPhysNew New handler range address.
2900 * @param cb Size of the handler range.
2901 * @param fHasHCHandler Set if the handler has a HC callback function.
2902 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2903 */
2904REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2905{
2906 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
2907 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
2908 VM_ASSERT_EMT(pVM);
2909 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2910
2911 if (pVM->rem.s.cHandlerNotifications)
2912 REMR3ReplayHandlerNotifications(pVM);
2913
2914 if (fHasHCHandler)
2915 {
2916 Assert(!pVM->rem.s.fIgnoreAll);
2917 pVM->rem.s.fIgnoreAll = true;
2918
2919 /*
2920 * Reset the old page.
2921 */
2922 if (!fRestoreAsRAM)
2923 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2924 else
2925 {
2926 /* This is not perfect, but it'll do for PD monitoring... */
2927 Assert(cb == PAGE_SIZE);
2928 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2929 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2930 }
2931
2932 /*
2933 * Update the new page.
2934 */
2935 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2936 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2937 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2938
2939 Assert(pVM->rem.s.fIgnoreAll);
2940 pVM->rem.s.fIgnoreAll = false;
2941 }
2942}
2943
2944
2945/**
2946 * Checks if we're handling access to this page or not.
2947 *
2948 * @returns true if we're trapping access.
2949 * @returns false if we aren't.
2950 * @param pVM The VM handle.
2951 * @param GCPhys The physical address.
2952 *
2953 * @remark This function will only work correctly in VBOX_STRICT builds!
2954 */
2955REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
2956{
2957#ifdef VBOX_STRICT
2958 unsigned long off;
2959 if (pVM->rem.s.cHandlerNotifications)
2960 REMR3ReplayHandlerNotifications(pVM);
2961
2962 off = get_phys_page_offset(GCPhys);
2963 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
2964 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
2965 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
2966#else
2967 return false;
2968#endif
2969}
2970
2971
2972/**
2973 * Deals with a rare case in get_phys_addr_code where the code
2974 * is being monitored.
2975 *
2976 * It could also be an MMIO page, in which case we will raise a fatal error.
2977 *
2978 * @returns The physical address corresponding to addr.
2979 * @param env The cpu environment.
2980 * @param addr The virtual address.
2981 * @param pTLBEntry The TLB entry.
2982 */
2983target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
2984 target_ulong addr,
2985 CPUTLBEntry* pTLBEntry,
2986 target_phys_addr_t ioTLBEntry)
2987{
2988 PVM pVM = env->pVM;
2989
2990 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
2991 {
2992 /* If code memory is being monitored, appropriate IOTLB entry will have
2993 handler IO type, and addend will provide real physical address, no
2994 matter if we store VA in TLB or not, as handlers are always passed PA */
2995 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
2996 return ret;
2997 }
2998 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
2999 "*** handlers\n",
3000 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3001 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3002 LogRel(("*** mmio\n"));
3003 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3004 LogRel(("*** phys\n"));
3005 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3006 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3007 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3008 AssertFatalFailed();
3009}
3010
3011/**
3012 * Read guest RAM and ROM.
3013 *
3014 * @param SrcGCPhys The source address (guest physical).
3015 * @param pvDst The destination address.
3016 * @param cb Number of bytes
3017 */
3018void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3019{
3020 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3021 VBOX_CHECK_ADDR(SrcGCPhys);
3022 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3023#ifdef VBOX_DEBUG_PHYS
3024 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3025#endif
3026 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3027}
3028
3029
3030/**
3031 * Read guest RAM and ROM, unsigned 8-bit.
3032 *
3033 * @param SrcGCPhys The source address (guest physical).
3034 */
3035RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3036{
3037 uint8_t val;
3038 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3039 VBOX_CHECK_ADDR(SrcGCPhys);
3040 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3041 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3042#ifdef VBOX_DEBUG_PHYS
3043 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3044#endif
3045 return val;
3046}
3047
3048
3049/**
3050 * Read guest RAM and ROM, signed 8-bit.
3051 *
3052 * @param SrcGCPhys The source address (guest physical).
3053 */
3054RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3055{
3056 int8_t val;
3057 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3058 VBOX_CHECK_ADDR(SrcGCPhys);
3059 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3060 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3061#ifdef VBOX_DEBUG_PHYS
3062 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3063#endif
3064 return val;
3065}
3066
3067
3068/**
3069 * Read guest RAM and ROM, unsigned 16-bit.
3070 *
3071 * @param SrcGCPhys The source address (guest physical).
3072 */
3073RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3074{
3075 uint16_t val;
3076 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3077 VBOX_CHECK_ADDR(SrcGCPhys);
3078 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3079 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3080#ifdef VBOX_DEBUG_PHYS
3081 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3082#endif
3083 return val;
3084}
3085
3086
3087/**
3088 * Read guest RAM and ROM, signed 16-bit.
3089 *
3090 * @param SrcGCPhys The source address (guest physical).
3091 */
3092RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3093{
3094 int16_t val;
3095 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3096 VBOX_CHECK_ADDR(SrcGCPhys);
3097 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3098 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3099#ifdef VBOX_DEBUG_PHYS
3100 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3101#endif
3102 return val;
3103}
3104
3105
3106/**
3107 * Read guest RAM and ROM, unsigned 32-bit.
3108 *
3109 * @param SrcGCPhys The source address (guest physical).
3110 */
3111RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3112{
3113 uint32_t val;
3114 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3115 VBOX_CHECK_ADDR(SrcGCPhys);
3116 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3117 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3118#ifdef VBOX_DEBUG_PHYS
3119 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3120#endif
3121 return val;
3122}
3123
3124
3125/**
3126 * Read guest RAM and ROM, signed 32-bit.
3127 *
3128 * @param SrcGCPhys The source address (guest physical).
3129 */
3130RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3131{
3132 int32_t val;
3133 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3134 VBOX_CHECK_ADDR(SrcGCPhys);
3135 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3136 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3137#ifdef VBOX_DEBUG_PHYS
3138 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3139#endif
3140 return val;
3141}
3142
3143
3144/**
3145 * Read guest RAM and ROM, unsigned 64-bit.
3146 *
3147 * @param SrcGCPhys The source address (guest physical).
3148 */
3149uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3150{
3151 uint64_t val;
3152 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3153 VBOX_CHECK_ADDR(SrcGCPhys);
3154 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3155 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3156#ifdef VBOX_DEBUG_PHYS
3157 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3158#endif
3159 return val;
3160}
3161
3162/**
3163 * Read guest RAM and ROM, signed 64-bit.
3164 *
3165 * @param SrcGCPhys The source address (guest physical).
3166 */
3167int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3168{
3169 int64_t val;
3170 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3171 VBOX_CHECK_ADDR(SrcGCPhys);
3172 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3173 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3174#ifdef VBOX_DEBUG_PHYS
3175 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3176#endif
3177 return val;
3178}
3179
3180
3181/**
3182 * Write guest RAM.
3183 *
3184 * @param DstGCPhys The destination address (guest physical).
3185 * @param pvSrc The source address.
3186 * @param cb Number of bytes to write
3187 */
3188void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3189{
3190 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3191 VBOX_CHECK_ADDR(DstGCPhys);
3192 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3193 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3194#ifdef VBOX_DEBUG_PHYS
3195 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3196#endif
3197}
3198
3199
3200/**
3201 * Write guest RAM, unsigned 8-bit.
3202 *
3203 * @param DstGCPhys The destination address (guest physical).
3204 * @param val Value
3205 */
3206void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3207{
3208 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3209 VBOX_CHECK_ADDR(DstGCPhys);
3210 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3211 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3212#ifdef VBOX_DEBUG_PHYS
3213 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3214#endif
3215}
3216
3217
3218/**
3219 * Write guest RAM, unsigned 8-bit.
3220 *
3221 * @param DstGCPhys The destination address (guest physical).
3222 * @param val Value
3223 */
3224void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3225{
3226 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3227 VBOX_CHECK_ADDR(DstGCPhys);
3228 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3229 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3230#ifdef VBOX_DEBUG_PHYS
3231 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3232#endif
3233}
3234
3235
3236/**
3237 * Write guest RAM, unsigned 32-bit.
3238 *
3239 * @param DstGCPhys The destination address (guest physical).
3240 * @param val Value
3241 */
3242void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3243{
3244 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3245 VBOX_CHECK_ADDR(DstGCPhys);
3246 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3247 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3248#ifdef VBOX_DEBUG_PHYS
3249 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3250#endif
3251}
3252
3253
3254/**
3255 * Write guest RAM, unsigned 64-bit.
3256 *
3257 * @param DstGCPhys The destination address (guest physical).
3258 * @param val Value
3259 */
3260void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3261{
3262 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3263 VBOX_CHECK_ADDR(DstGCPhys);
3264 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3265 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3266#ifdef VBOX_DEBUG_PHYS
3267 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3268#endif
3269}
3270
3271#undef LOG_GROUP
3272#define LOG_GROUP LOG_GROUP_REM_MMIO
3273
3274/** Read MMIO memory. */
3275static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3276{
3277 uint32_t u32 = 0;
3278 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3279 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3280 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3281 return u32;
3282}
3283
3284/** Read MMIO memory. */
3285static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3286{
3287 uint32_t u32 = 0;
3288 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3289 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3290 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3291 return u32;
3292}
3293
3294/** Read MMIO memory. */
3295static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3296{
3297 uint32_t u32 = 0;
3298 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3299 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3300 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3301 return u32;
3302}
3303
3304/** Write to MMIO memory. */
3305static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3306{
3307 int rc;
3308 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3309 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3310 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3311}
3312
3313/** Write to MMIO memory. */
3314static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3315{
3316 int rc;
3317 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3318 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3319 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3320}
3321
3322/** Write to MMIO memory. */
3323static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3324{
3325 int rc;
3326 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3327 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3328 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3329}
3330
3331
3332#undef LOG_GROUP
3333#define LOG_GROUP LOG_GROUP_REM_HANDLER
3334
3335/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3336
3337static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3338{
3339 uint8_t u8;
3340 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3341 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3342 return u8;
3343}
3344
3345static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3346{
3347 uint16_t u16;
3348 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3349 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3350 return u16;
3351}
3352
3353static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3354{
3355 uint32_t u32;
3356 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3357 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3358 return u32;
3359}
3360
3361static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3362{
3363 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3364 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3365}
3366
3367static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3368{
3369 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3370 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3371}
3372
3373static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3374{
3375 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3376 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3377}
3378
3379/* -+- disassembly -+- */
3380
3381#undef LOG_GROUP
3382#define LOG_GROUP LOG_GROUP_REM_DISAS
3383
3384
3385/**
3386 * Enables or disables singled stepped disassembly.
3387 *
3388 * @returns VBox status code.
3389 * @param pVM VM handle.
3390 * @param fEnable To enable set this flag, to disable clear it.
3391 */
3392static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3393{
3394 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3395 VM_ASSERT_EMT(pVM);
3396
3397 if (fEnable)
3398 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3399 else
3400 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3401 return VINF_SUCCESS;
3402}
3403
3404
3405/**
3406 * Enables or disables singled stepped disassembly.
3407 *
3408 * @returns VBox status code.
3409 * @param pVM VM handle.
3410 * @param fEnable To enable set this flag, to disable clear it.
3411 */
3412REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3413{
3414 PVMREQ pReq;
3415 int rc;
3416
3417 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3418 if (VM_IS_EMT(pVM))
3419 return remR3DisasEnableStepping(pVM, fEnable);
3420
3421 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3422 AssertRC(rc);
3423 if (RT_SUCCESS(rc))
3424 rc = pReq->iStatus;
3425 VMR3ReqFree(pReq);
3426 return rc;
3427}
3428
3429
3430#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3431/**
3432 * External Debugger Command: .remstep [on|off|1|0]
3433 */
3434static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3435{
3436 bool fEnable;
3437 int rc;
3438
3439 /* print status */
3440 if (cArgs == 0)
3441 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3442 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3443
3444 /* convert the argument and change the mode. */
3445 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3446 if (RT_FAILURE(rc))
3447 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3448 rc = REMR3DisasEnableStepping(pVM, fEnable);
3449 if (RT_FAILURE(rc))
3450 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3451 return rc;
3452}
3453#endif
3454
3455
3456/**
3457 * Disassembles n instructions and prints them to the log.
3458 *
3459 * @returns Success indicator.
3460 * @param env Pointer to the recompiler CPU structure.
3461 * @param f32BitCode Indicates that whether or not the code should
3462 * be disassembled as 16 or 32 bit. If -1 the CS
3463 * selector will be inspected.
3464 * @param nrInstructions Nr of instructions to disassemble
3465 * @param pszPrefix
3466 * @remark not currently used for anything but ad-hoc debugging.
3467 */
3468bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3469{
3470 int i, rc;
3471 RTGCPTR GCPtrPC;
3472 uint8_t *pvPC;
3473 RTINTPTR off;
3474 DISCPUSTATE Cpu;
3475
3476 /*
3477 * Determin 16/32 bit mode.
3478 */
3479 if (f32BitCode == -1)
3480 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3481
3482 /*
3483 * Convert cs:eip to host context address.
3484 * We don't care to much about cross page correctness presently.
3485 */
3486 GCPtrPC = env->segs[R_CS].base + env->eip;
3487 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3488 {
3489 Assert(PGMGetGuestMode(env->pVM) < PGMMODE_AMD64);
3490
3491 /* convert eip to physical address. */
3492 rc = PGMPhysGCPtr2R3PtrByGstCR3(env->pVM,
3493 GCPtrPC,
3494 env->cr[3],
3495 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3496 (void**)&pvPC);
3497 if (RT_FAILURE(rc))
3498 {
3499 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3500 return false;
3501 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3502 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3503 }
3504 }
3505 else
3506 {
3507 /* physical address */
3508 rc = PGMPhysGCPhys2R3Ptr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16,
3509 (void**)&pvPC);
3510 if (RT_FAILURE(rc))
3511 return false;
3512 }
3513
3514 /*
3515 * Disassemble.
3516 */
3517 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3518 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3519 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3520 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3521 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3522 //Cpu.dwUserData[2] = GCPtrPC;
3523
3524 for (i=0;i<nrInstructions;i++)
3525 {
3526 char szOutput[256];
3527 uint32_t cbOp;
3528 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3529 return false;
3530 if (pszPrefix)
3531 Log(("%s: %s", pszPrefix, szOutput));
3532 else
3533 Log(("%s", szOutput));
3534
3535 pvPC += cbOp;
3536 }
3537 return true;
3538}
3539
3540
3541/** @todo need to test the new code, using the old code in the mean while. */
3542#define USE_OLD_DUMP_AND_DISASSEMBLY
3543
3544/**
3545 * Disassembles one instruction and prints it to the log.
3546 *
3547 * @returns Success indicator.
3548 * @param env Pointer to the recompiler CPU structure.
3549 * @param f32BitCode Indicates that whether or not the code should
3550 * be disassembled as 16 or 32 bit. If -1 the CS
3551 * selector will be inspected.
3552 * @param pszPrefix
3553 */
3554bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3555{
3556#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3557 PVM pVM = env->pVM;
3558 RTGCPTR GCPtrPC;
3559 uint8_t *pvPC;
3560 char szOutput[256];
3561 uint32_t cbOp;
3562 RTINTPTR off;
3563 DISCPUSTATE Cpu;
3564
3565
3566 /* Doesn't work in long mode. */
3567 if (env->hflags & HF_LMA_MASK)
3568 return false;
3569
3570 /*
3571 * Determin 16/32 bit mode.
3572 */
3573 if (f32BitCode == -1)
3574 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3575
3576 /*
3577 * Log registers
3578 */
3579 if (LogIs2Enabled())
3580 {
3581 remR3StateUpdate(pVM);
3582 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3583 }
3584
3585 /*
3586 * Convert cs:eip to host context address.
3587 * We don't care to much about cross page correctness presently.
3588 */
3589 GCPtrPC = env->segs[R_CS].base + env->eip;
3590 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3591 {
3592 /* convert eip to physical address. */
3593 int rc = PGMPhysGCPtr2R3PtrByGstCR3(pVM,
3594 GCPtrPC,
3595 env->cr[3],
3596 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3597 (void**)&pvPC);
3598 if (RT_FAILURE(rc))
3599 {
3600 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3601 return false;
3602 pvPC = (uint8_t *)PATMR3QueryPatchMemHC(pVM, NULL)
3603 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3604 }
3605 }
3606 else
3607 {
3608
3609 /* physical address */
3610 int rc = PGMPhysGCPhys2R3Ptr(pVM, (RTGCPHYS)GCPtrPC, 16, (void**)&pvPC);
3611 if (RT_FAILURE(rc))
3612 return false;
3613 }
3614
3615 /*
3616 * Disassemble.
3617 */
3618 off = env->eip - (RTGCUINTPTR)(uintptr_t)pvPC;
3619 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3620 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3621 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3622 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3623 //Cpu.dwUserData[2] = GCPtrPC;
3624 if (RT_FAILURE(DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0])))
3625 return false;
3626
3627 if (!f32BitCode)
3628 {
3629 if (pszPrefix)
3630 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3631 else
3632 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3633 }
3634 else
3635 {
3636 if (pszPrefix)
3637 Log(("%s: %s", pszPrefix, szOutput));
3638 else
3639 Log(("%s", szOutput));
3640 }
3641 return true;
3642
3643#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3644 PVM pVM = env->pVM;
3645 const bool fLog = LogIsEnabled();
3646 const bool fLog2 = LogIs2Enabled();
3647 int rc = VINF_SUCCESS;
3648
3649 /*
3650 * Don't bother if there ain't any log output to do.
3651 */
3652 if (!fLog && !fLog2)
3653 return true;
3654
3655 /*
3656 * Update the state so DBGF reads the correct register values.
3657 */
3658 remR3StateUpdate(pVM);
3659
3660 /*
3661 * Log registers if requested.
3662 */
3663 if (!fLog2)
3664 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3665
3666 /*
3667 * Disassemble to log.
3668 */
3669 if (fLog)
3670 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3671
3672 return RT_SUCCESS(rc);
3673#endif
3674}
3675
3676
3677/**
3678 * Disassemble recompiled code.
3679 *
3680 * @param phFileIgnored Ignored, logfile usually.
3681 * @param pvCode Pointer to the code block.
3682 * @param cb Size of the code block.
3683 */
3684void disas(FILE *phFile, void *pvCode, unsigned long cb)
3685{
3686#ifdef DEBUG_TMP_LOGGING
3687# define DISAS_PRINTF(x...) fprintf(phFile, x)
3688#else
3689# define DISAS_PRINTF(x...) RTLogPrintf(x)
3690 if (LogIs2Enabled())
3691#endif
3692 {
3693 unsigned off = 0;
3694 char szOutput[256];
3695 DISCPUSTATE Cpu;
3696
3697 memset(&Cpu, 0, sizeof(Cpu));
3698#ifdef RT_ARCH_X86
3699 Cpu.mode = CPUMODE_32BIT;
3700#else
3701 Cpu.mode = CPUMODE_64BIT;
3702#endif
3703
3704 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3705 while (off < cb)
3706 {
3707 uint32_t cbInstr;
3708 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3709 DISAS_PRINTF("%s", szOutput);
3710 else
3711 {
3712 DISAS_PRINTF("disas error\n");
3713 cbInstr = 1;
3714#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3715 break;
3716#endif
3717 }
3718 off += cbInstr;
3719 }
3720 }
3721
3722#undef DISAS_PRINTF
3723}
3724
3725
3726/**
3727 * Disassemble guest code.
3728 *
3729 * @param phFileIgnored Ignored, logfile usually.
3730 * @param uCode The guest address of the code to disassemble. (flat?)
3731 * @param cb Number of bytes to disassemble.
3732 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3733 */
3734void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3735{
3736#ifdef DEBUG_TMP_LOGGING
3737# define DISAS_PRINTF(x...) fprintf(phFile, x)
3738#else
3739# define DISAS_PRINTF(x...) RTLogPrintf(x)
3740 if (LogIs2Enabled())
3741#endif
3742 {
3743 PVM pVM = cpu_single_env->pVM;
3744 RTSEL cs;
3745 RTGCUINTPTR eip;
3746
3747 /*
3748 * Update the state so DBGF reads the correct register values (flags).
3749 */
3750 remR3StateUpdate(pVM);
3751
3752 /*
3753 * Do the disassembling.
3754 */
3755 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3756 cs = cpu_single_env->segs[R_CS].selector;
3757 eip = uCode - cpu_single_env->segs[R_CS].base;
3758 for (;;)
3759 {
3760 char szBuf[256];
3761 uint32_t cbInstr;
3762 int rc = DBGFR3DisasInstrEx(pVM,
3763 cs,
3764 eip,
3765 0,
3766 szBuf, sizeof(szBuf),
3767 &cbInstr);
3768 if (RT_SUCCESS(rc))
3769 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3770 else
3771 {
3772 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3773 cbInstr = 1;
3774 }
3775
3776 /* next */
3777 if (cb <= cbInstr)
3778 break;
3779 cb -= cbInstr;
3780 uCode += cbInstr;
3781 eip += cbInstr;
3782 }
3783 }
3784#undef DISAS_PRINTF
3785}
3786
3787
3788/**
3789 * Looks up a guest symbol.
3790 *
3791 * @returns Pointer to symbol name. This is a static buffer.
3792 * @param orig_addr The address in question.
3793 */
3794const char *lookup_symbol(target_ulong orig_addr)
3795{
3796 RTGCINTPTR off = 0;
3797 DBGFSYMBOL Sym;
3798 PVM pVM = cpu_single_env->pVM;
3799 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3800 if (RT_SUCCESS(rc))
3801 {
3802 static char szSym[sizeof(Sym.szName) + 48];
3803 if (!off)
3804 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3805 else if (off > 0)
3806 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3807 else
3808 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3809 return szSym;
3810 }
3811 return "<N/A>";
3812}
3813
3814
3815#undef LOG_GROUP
3816#define LOG_GROUP LOG_GROUP_REM
3817
3818
3819/* -+- FF notifications -+- */
3820
3821
3822/**
3823 * Notification about a pending interrupt.
3824 *
3825 * @param pVM VM Handle.
3826 * @param u8Interrupt Interrupt
3827 * @thread The emulation thread.
3828 */
3829REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3830{
3831 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3832 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3833}
3834
3835/**
3836 * Notification about a pending interrupt.
3837 *
3838 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3839 * @param pVM VM Handle.
3840 * @thread The emulation thread.
3841 */
3842REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3843{
3844 return pVM->rem.s.u32PendingInterrupt;
3845}
3846
3847/**
3848 * Notification about the interrupt FF being set.
3849 *
3850 * @param pVM VM Handle.
3851 * @thread The emulation thread.
3852 */
3853REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3854{
3855 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3856 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3857 if (pVM->rem.s.fInREM)
3858 {
3859 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3860 CPU_INTERRUPT_EXTERNAL_HARD);
3861 }
3862}
3863
3864
3865/**
3866 * Notification about the interrupt FF being set.
3867 *
3868 * @param pVM VM Handle.
3869 * @thread Any.
3870 */
3871REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3872{
3873 LogFlow(("REMR3NotifyInterruptClear:\n"));
3874 if (pVM->rem.s.fInREM)
3875 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3876}
3877
3878
3879/**
3880 * Notification about pending timer(s).
3881 *
3882 * @param pVM VM Handle.
3883 * @thread Any.
3884 */
3885REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3886{
3887#ifndef DEBUG_bird
3888 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3889#endif
3890 if (pVM->rem.s.fInREM)
3891 {
3892 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3893 CPU_INTERRUPT_EXTERNAL_TIMER);
3894 }
3895}
3896
3897
3898/**
3899 * Notification about pending DMA transfers.
3900 *
3901 * @param pVM VM Handle.
3902 * @thread Any.
3903 */
3904REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3905{
3906 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3907 if (pVM->rem.s.fInREM)
3908 {
3909 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3910 CPU_INTERRUPT_EXTERNAL_DMA);
3911 }
3912}
3913
3914
3915/**
3916 * Notification about pending timer(s).
3917 *
3918 * @param pVM VM Handle.
3919 * @thread Any.
3920 */
3921REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3922{
3923 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3924 if (pVM->rem.s.fInREM)
3925 {
3926 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3927 CPU_INTERRUPT_EXTERNAL_EXIT);
3928 }
3929}
3930
3931
3932/**
3933 * Notification about pending FF set by an external thread.
3934 *
3935 * @param pVM VM handle.
3936 * @thread Any.
3937 */
3938REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3939{
3940 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3941 if (pVM->rem.s.fInREM)
3942 {
3943 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3944 CPU_INTERRUPT_EXTERNAL_EXIT);
3945 }
3946}
3947
3948
3949#ifdef VBOX_WITH_STATISTICS
3950void remR3ProfileStart(int statcode)
3951{
3952 STAMPROFILEADV *pStat;
3953 switch(statcode)
3954 {
3955 case STATS_EMULATE_SINGLE_INSTR:
3956 pStat = &gStatExecuteSingleInstr;
3957 break;
3958 case STATS_QEMU_COMPILATION:
3959 pStat = &gStatCompilationQEmu;
3960 break;
3961 case STATS_QEMU_RUN_EMULATED_CODE:
3962 pStat = &gStatRunCodeQEmu;
3963 break;
3964 case STATS_QEMU_TOTAL:
3965 pStat = &gStatTotalTimeQEmu;
3966 break;
3967 case STATS_QEMU_RUN_TIMERS:
3968 pStat = &gStatTimers;
3969 break;
3970 case STATS_TLB_LOOKUP:
3971 pStat= &gStatTBLookup;
3972 break;
3973 case STATS_IRQ_HANDLING:
3974 pStat= &gStatIRQ;
3975 break;
3976 case STATS_RAW_CHECK:
3977 pStat = &gStatRawCheck;
3978 break;
3979
3980 default:
3981 AssertMsgFailed(("unknown stat %d\n", statcode));
3982 return;
3983 }
3984 STAM_PROFILE_ADV_START(pStat, a);
3985}
3986
3987
3988void remR3ProfileStop(int statcode)
3989{
3990 STAMPROFILEADV *pStat;
3991 switch(statcode)
3992 {
3993 case STATS_EMULATE_SINGLE_INSTR:
3994 pStat = &gStatExecuteSingleInstr;
3995 break;
3996 case STATS_QEMU_COMPILATION:
3997 pStat = &gStatCompilationQEmu;
3998 break;
3999 case STATS_QEMU_RUN_EMULATED_CODE:
4000 pStat = &gStatRunCodeQEmu;
4001 break;
4002 case STATS_QEMU_TOTAL:
4003 pStat = &gStatTotalTimeQEmu;
4004 break;
4005 case STATS_QEMU_RUN_TIMERS:
4006 pStat = &gStatTimers;
4007 break;
4008 case STATS_TLB_LOOKUP:
4009 pStat= &gStatTBLookup;
4010 break;
4011 case STATS_IRQ_HANDLING:
4012 pStat= &gStatIRQ;
4013 break;
4014 case STATS_RAW_CHECK:
4015 pStat = &gStatRawCheck;
4016 break;
4017 default:
4018 AssertMsgFailed(("unknown stat %d\n", statcode));
4019 return;
4020 }
4021 STAM_PROFILE_ADV_STOP(pStat, a);
4022}
4023#endif
4024
4025/**
4026 * Raise an RC, force rem exit.
4027 *
4028 * @param pVM VM handle.
4029 * @param rc The rc.
4030 */
4031void remR3RaiseRC(PVM pVM, int rc)
4032{
4033 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4034 Assert(pVM->rem.s.fInREM);
4035 VM_ASSERT_EMT(pVM);
4036 pVM->rem.s.rc = rc;
4037 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4038}
4039
4040
4041/* -+- timers -+- */
4042
4043uint64_t cpu_get_tsc(CPUX86State *env)
4044{
4045 STAM_COUNTER_INC(&gStatCpuGetTSC);
4046 return TMCpuTickGet(env->pVM);
4047}
4048
4049
4050/* -+- interrupts -+- */
4051
4052void cpu_set_ferr(CPUX86State *env)
4053{
4054 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4055 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4056}
4057
4058int cpu_get_pic_interrupt(CPUState *env)
4059{
4060 uint8_t u8Interrupt;
4061 int rc;
4062
4063 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4064 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4065 * with the (a)pic.
4066 */
4067 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4068 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4069 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4070 * remove this kludge. */
4071 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4072 {
4073 rc = VINF_SUCCESS;
4074 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4075 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4076 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4077 }
4078 else
4079 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4080
4081 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4082 if (RT_SUCCESS(rc))
4083 {
4084 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4085 env->interrupt_request |= CPU_INTERRUPT_HARD;
4086 return u8Interrupt;
4087 }
4088 return -1;
4089}
4090
4091
4092/* -+- local apic -+- */
4093
4094void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4095{
4096 int rc = PDMApicSetBase(env->pVM, val);
4097 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4098}
4099
4100uint64_t cpu_get_apic_base(CPUX86State *env)
4101{
4102 uint64_t u64;
4103 int rc = PDMApicGetBase(env->pVM, &u64);
4104 if (RT_SUCCESS(rc))
4105 {
4106 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4107 return u64;
4108 }
4109 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4110 return 0;
4111}
4112
4113void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4114{
4115 int rc = PDMApicSetTPR(env->pVM, val);
4116 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4117}
4118
4119uint8_t cpu_get_apic_tpr(CPUX86State *env)
4120{
4121 uint8_t u8;
4122 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4123 if (RT_SUCCESS(rc))
4124 {
4125 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4126 return u8;
4127 }
4128 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4129 return 0;
4130}
4131
4132
4133uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4134{
4135 uint64_t value;
4136 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4137 if (RT_SUCCESS(rc))
4138 {
4139 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4140 return value;
4141 }
4142 /** @todo: exception ? */
4143 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4144 return value;
4145}
4146
4147void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4148{
4149 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4150 /** @todo: exception if error ? */
4151 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4152}
4153
4154uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4155{
4156 return CPUMGetGuestMsr(env->pVM, msr);
4157}
4158
4159void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4160{
4161 CPUMSetGuestMsr(env->pVM, msr, val);
4162}
4163/* -+- I/O Ports -+- */
4164
4165#undef LOG_GROUP
4166#define LOG_GROUP LOG_GROUP_REM_IOPORT
4167
4168void cpu_outb(CPUState *env, int addr, int val)
4169{
4170 int rc;
4171
4172 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4173 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4174
4175 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4176 if (RT_LIKELY(rc == VINF_SUCCESS))
4177 return;
4178 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4179 {
4180 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4181 remR3RaiseRC(env->pVM, rc);
4182 return;
4183 }
4184 remAbort(rc, __FUNCTION__);
4185}
4186
4187void cpu_outw(CPUState *env, int addr, int val)
4188{
4189 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4190 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4191 if (RT_LIKELY(rc == VINF_SUCCESS))
4192 return;
4193 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4194 {
4195 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4196 remR3RaiseRC(env->pVM, rc);
4197 return;
4198 }
4199 remAbort(rc, __FUNCTION__);
4200}
4201
4202void cpu_outl(CPUState *env, int addr, int val)
4203{
4204 int rc;
4205 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4206 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4207 if (RT_LIKELY(rc == VINF_SUCCESS))
4208 return;
4209 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4210 {
4211 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4212 remR3RaiseRC(env->pVM, rc);
4213 return;
4214 }
4215 remAbort(rc, __FUNCTION__);
4216}
4217
4218int cpu_inb(CPUState *env, int addr)
4219{
4220 uint32_t u32 = 0;
4221 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4222 if (RT_LIKELY(rc == VINF_SUCCESS))
4223 {
4224 if (/*addr != 0x61 && */addr != 0x71)
4225 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4226 return (int)u32;
4227 }
4228 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4229 {
4230 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4231 remR3RaiseRC(env->pVM, rc);
4232 return (int)u32;
4233 }
4234 remAbort(rc, __FUNCTION__);
4235 return 0xff;
4236}
4237
4238int cpu_inw(CPUState *env, int addr)
4239{
4240 uint32_t u32 = 0;
4241 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4242 if (RT_LIKELY(rc == VINF_SUCCESS))
4243 {
4244 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4245 return (int)u32;
4246 }
4247 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4248 {
4249 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4250 remR3RaiseRC(env->pVM, rc);
4251 return (int)u32;
4252 }
4253 remAbort(rc, __FUNCTION__);
4254 return 0xffff;
4255}
4256
4257int cpu_inl(CPUState *env, int addr)
4258{
4259 uint32_t u32 = 0;
4260 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4261 if (RT_LIKELY(rc == VINF_SUCCESS))
4262 {
4263//if (addr==0x01f0 && u32 == 0x6b6d)
4264// loglevel = ~0;
4265 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4266 return (int)u32;
4267 }
4268 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4269 {
4270 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4271 remR3RaiseRC(env->pVM, rc);
4272 return (int)u32;
4273 }
4274 remAbort(rc, __FUNCTION__);
4275 return 0xffffffff;
4276}
4277
4278#undef LOG_GROUP
4279#define LOG_GROUP LOG_GROUP_REM
4280
4281
4282/* -+- helpers and misc other interfaces -+- */
4283
4284/**
4285 * Perform the CPUID instruction.
4286 *
4287 * ASMCpuId cannot be invoked from some source files where this is used because of global
4288 * register allocations.
4289 *
4290 * @param env Pointer to the recompiler CPU structure.
4291 * @param uOperator CPUID operation (eax).
4292 * @param pvEAX Where to store eax.
4293 * @param pvEBX Where to store ebx.
4294 * @param pvECX Where to store ecx.
4295 * @param pvEDX Where to store edx.
4296 */
4297void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4298{
4299 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4300}
4301
4302
4303#if 0 /* not used */
4304/**
4305 * Interface for qemu hardware to report back fatal errors.
4306 */
4307void hw_error(const char *pszFormat, ...)
4308{
4309 /*
4310 * Bitch about it.
4311 */
4312 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4313 * this in my Odin32 tree at home! */
4314 va_list args;
4315 va_start(args, pszFormat);
4316 RTLogPrintf("fatal error in virtual hardware:");
4317 RTLogPrintfV(pszFormat, args);
4318 va_end(args);
4319 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4320
4321 /*
4322 * If we're in REM context we'll sync back the state before 'jumping' to
4323 * the EMs failure handling.
4324 */
4325 PVM pVM = cpu_single_env->pVM;
4326 if (pVM->rem.s.fInREM)
4327 REMR3StateBack(pVM);
4328 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4329 AssertMsgFailed(("EMR3FatalError returned!\n"));
4330}
4331#endif
4332
4333/**
4334 * Interface for the qemu cpu to report unhandled situation
4335 * raising a fatal VM error.
4336 */
4337void cpu_abort(CPUState *env, const char *pszFormat, ...)
4338{
4339 va_list args;
4340 PVM pVM;
4341
4342 /*
4343 * Bitch about it.
4344 */
4345#ifndef _MSC_VER
4346 /** @todo: MSVC is right - it's not valid C */
4347 RTLogFlags(NULL, "nodisabled nobuffered");
4348#endif
4349 va_start(args, pszFormat);
4350 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4351 va_end(args);
4352 va_start(args, pszFormat);
4353 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4354 va_end(args);
4355
4356 /*
4357 * If we're in REM context we'll sync back the state before 'jumping' to
4358 * the EMs failure handling.
4359 */
4360 pVM = cpu_single_env->pVM;
4361 if (pVM->rem.s.fInREM)
4362 REMR3StateBack(pVM);
4363 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4364 AssertMsgFailed(("EMR3FatalError returned!\n"));
4365}
4366
4367
4368/**
4369 * Aborts the VM.
4370 *
4371 * @param rc VBox error code.
4372 * @param pszTip Hint about why/when this happend.
4373 */
4374void remAbort(int rc, const char *pszTip)
4375{
4376 PVM pVM;
4377
4378 /*
4379 * Bitch about it.
4380 */
4381 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4382 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4383
4384 /*
4385 * Jump back to where we entered the recompiler.
4386 */
4387 pVM = cpu_single_env->pVM;
4388 if (pVM->rem.s.fInREM)
4389 REMR3StateBack(pVM);
4390 EMR3FatalError(pVM, rc);
4391 AssertMsgFailed(("EMR3FatalError returned!\n"));
4392}
4393
4394
4395/**
4396 * Dumps a linux system call.
4397 * @param pVM VM handle.
4398 */
4399void remR3DumpLnxSyscall(PVM pVM)
4400{
4401 static const char *apsz[] =
4402 {
4403 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4404 "sys_exit",
4405 "sys_fork",
4406 "sys_read",
4407 "sys_write",
4408 "sys_open", /* 5 */
4409 "sys_close",
4410 "sys_waitpid",
4411 "sys_creat",
4412 "sys_link",
4413 "sys_unlink", /* 10 */
4414 "sys_execve",
4415 "sys_chdir",
4416 "sys_time",
4417 "sys_mknod",
4418 "sys_chmod", /* 15 */
4419 "sys_lchown16",
4420 "sys_ni_syscall", /* old break syscall holder */
4421 "sys_stat",
4422 "sys_lseek",
4423 "sys_getpid", /* 20 */
4424 "sys_mount",
4425 "sys_oldumount",
4426 "sys_setuid16",
4427 "sys_getuid16",
4428 "sys_stime", /* 25 */
4429 "sys_ptrace",
4430 "sys_alarm",
4431 "sys_fstat",
4432 "sys_pause",
4433 "sys_utime", /* 30 */
4434 "sys_ni_syscall", /* old stty syscall holder */
4435 "sys_ni_syscall", /* old gtty syscall holder */
4436 "sys_access",
4437 "sys_nice",
4438 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4439 "sys_sync",
4440 "sys_kill",
4441 "sys_rename",
4442 "sys_mkdir",
4443 "sys_rmdir", /* 40 */
4444 "sys_dup",
4445 "sys_pipe",
4446 "sys_times",
4447 "sys_ni_syscall", /* old prof syscall holder */
4448 "sys_brk", /* 45 */
4449 "sys_setgid16",
4450 "sys_getgid16",
4451 "sys_signal",
4452 "sys_geteuid16",
4453 "sys_getegid16", /* 50 */
4454 "sys_acct",
4455 "sys_umount", /* recycled never used phys() */
4456 "sys_ni_syscall", /* old lock syscall holder */
4457 "sys_ioctl",
4458 "sys_fcntl", /* 55 */
4459 "sys_ni_syscall", /* old mpx syscall holder */
4460 "sys_setpgid",
4461 "sys_ni_syscall", /* old ulimit syscall holder */
4462 "sys_olduname",
4463 "sys_umask", /* 60 */
4464 "sys_chroot",
4465 "sys_ustat",
4466 "sys_dup2",
4467 "sys_getppid",
4468 "sys_getpgrp", /* 65 */
4469 "sys_setsid",
4470 "sys_sigaction",
4471 "sys_sgetmask",
4472 "sys_ssetmask",
4473 "sys_setreuid16", /* 70 */
4474 "sys_setregid16",
4475 "sys_sigsuspend",
4476 "sys_sigpending",
4477 "sys_sethostname",
4478 "sys_setrlimit", /* 75 */
4479 "sys_old_getrlimit",
4480 "sys_getrusage",
4481 "sys_gettimeofday",
4482 "sys_settimeofday",
4483 "sys_getgroups16", /* 80 */
4484 "sys_setgroups16",
4485 "old_select",
4486 "sys_symlink",
4487 "sys_lstat",
4488 "sys_readlink", /* 85 */
4489 "sys_uselib",
4490 "sys_swapon",
4491 "sys_reboot",
4492 "old_readdir",
4493 "old_mmap", /* 90 */
4494 "sys_munmap",
4495 "sys_truncate",
4496 "sys_ftruncate",
4497 "sys_fchmod",
4498 "sys_fchown16", /* 95 */
4499 "sys_getpriority",
4500 "sys_setpriority",
4501 "sys_ni_syscall", /* old profil syscall holder */
4502 "sys_statfs",
4503 "sys_fstatfs", /* 100 */
4504 "sys_ioperm",
4505 "sys_socketcall",
4506 "sys_syslog",
4507 "sys_setitimer",
4508 "sys_getitimer", /* 105 */
4509 "sys_newstat",
4510 "sys_newlstat",
4511 "sys_newfstat",
4512 "sys_uname",
4513 "sys_iopl", /* 110 */
4514 "sys_vhangup",
4515 "sys_ni_syscall", /* old "idle" system call */
4516 "sys_vm86old",
4517 "sys_wait4",
4518 "sys_swapoff", /* 115 */
4519 "sys_sysinfo",
4520 "sys_ipc",
4521 "sys_fsync",
4522 "sys_sigreturn",
4523 "sys_clone", /* 120 */
4524 "sys_setdomainname",
4525 "sys_newuname",
4526 "sys_modify_ldt",
4527 "sys_adjtimex",
4528 "sys_mprotect", /* 125 */
4529 "sys_sigprocmask",
4530 "sys_ni_syscall", /* old "create_module" */
4531 "sys_init_module",
4532 "sys_delete_module",
4533 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4534 "sys_quotactl",
4535 "sys_getpgid",
4536 "sys_fchdir",
4537 "sys_bdflush",
4538 "sys_sysfs", /* 135 */
4539 "sys_personality",
4540 "sys_ni_syscall", /* reserved for afs_syscall */
4541 "sys_setfsuid16",
4542 "sys_setfsgid16",
4543 "sys_llseek", /* 140 */
4544 "sys_getdents",
4545 "sys_select",
4546 "sys_flock",
4547 "sys_msync",
4548 "sys_readv", /* 145 */
4549 "sys_writev",
4550 "sys_getsid",
4551 "sys_fdatasync",
4552 "sys_sysctl",
4553 "sys_mlock", /* 150 */
4554 "sys_munlock",
4555 "sys_mlockall",
4556 "sys_munlockall",
4557 "sys_sched_setparam",
4558 "sys_sched_getparam", /* 155 */
4559 "sys_sched_setscheduler",
4560 "sys_sched_getscheduler",
4561 "sys_sched_yield",
4562 "sys_sched_get_priority_max",
4563 "sys_sched_get_priority_min", /* 160 */
4564 "sys_sched_rr_get_interval",
4565 "sys_nanosleep",
4566 "sys_mremap",
4567 "sys_setresuid16",
4568 "sys_getresuid16", /* 165 */
4569 "sys_vm86",
4570 "sys_ni_syscall", /* Old sys_query_module */
4571 "sys_poll",
4572 "sys_nfsservctl",
4573 "sys_setresgid16", /* 170 */
4574 "sys_getresgid16",
4575 "sys_prctl",
4576 "sys_rt_sigreturn",
4577 "sys_rt_sigaction",
4578 "sys_rt_sigprocmask", /* 175 */
4579 "sys_rt_sigpending",
4580 "sys_rt_sigtimedwait",
4581 "sys_rt_sigqueueinfo",
4582 "sys_rt_sigsuspend",
4583 "sys_pread64", /* 180 */
4584 "sys_pwrite64",
4585 "sys_chown16",
4586 "sys_getcwd",
4587 "sys_capget",
4588 "sys_capset", /* 185 */
4589 "sys_sigaltstack",
4590 "sys_sendfile",
4591 "sys_ni_syscall", /* reserved for streams1 */
4592 "sys_ni_syscall", /* reserved for streams2 */
4593 "sys_vfork", /* 190 */
4594 "sys_getrlimit",
4595 "sys_mmap2",
4596 "sys_truncate64",
4597 "sys_ftruncate64",
4598 "sys_stat64", /* 195 */
4599 "sys_lstat64",
4600 "sys_fstat64",
4601 "sys_lchown",
4602 "sys_getuid",
4603 "sys_getgid", /* 200 */
4604 "sys_geteuid",
4605 "sys_getegid",
4606 "sys_setreuid",
4607 "sys_setregid",
4608 "sys_getgroups", /* 205 */
4609 "sys_setgroups",
4610 "sys_fchown",
4611 "sys_setresuid",
4612 "sys_getresuid",
4613 "sys_setresgid", /* 210 */
4614 "sys_getresgid",
4615 "sys_chown",
4616 "sys_setuid",
4617 "sys_setgid",
4618 "sys_setfsuid", /* 215 */
4619 "sys_setfsgid",
4620 "sys_pivot_root",
4621 "sys_mincore",
4622 "sys_madvise",
4623 "sys_getdents64", /* 220 */
4624 "sys_fcntl64",
4625 "sys_ni_syscall", /* reserved for TUX */
4626 "sys_ni_syscall",
4627 "sys_gettid",
4628 "sys_readahead", /* 225 */
4629 "sys_setxattr",
4630 "sys_lsetxattr",
4631 "sys_fsetxattr",
4632 "sys_getxattr",
4633 "sys_lgetxattr", /* 230 */
4634 "sys_fgetxattr",
4635 "sys_listxattr",
4636 "sys_llistxattr",
4637 "sys_flistxattr",
4638 "sys_removexattr", /* 235 */
4639 "sys_lremovexattr",
4640 "sys_fremovexattr",
4641 "sys_tkill",
4642 "sys_sendfile64",
4643 "sys_futex", /* 240 */
4644 "sys_sched_setaffinity",
4645 "sys_sched_getaffinity",
4646 "sys_set_thread_area",
4647 "sys_get_thread_area",
4648 "sys_io_setup", /* 245 */
4649 "sys_io_destroy",
4650 "sys_io_getevents",
4651 "sys_io_submit",
4652 "sys_io_cancel",
4653 "sys_fadvise64", /* 250 */
4654 "sys_ni_syscall",
4655 "sys_exit_group",
4656 "sys_lookup_dcookie",
4657 "sys_epoll_create",
4658 "sys_epoll_ctl", /* 255 */
4659 "sys_epoll_wait",
4660 "sys_remap_file_pages",
4661 "sys_set_tid_address",
4662 "sys_timer_create",
4663 "sys_timer_settime", /* 260 */
4664 "sys_timer_gettime",
4665 "sys_timer_getoverrun",
4666 "sys_timer_delete",
4667 "sys_clock_settime",
4668 "sys_clock_gettime", /* 265 */
4669 "sys_clock_getres",
4670 "sys_clock_nanosleep",
4671 "sys_statfs64",
4672 "sys_fstatfs64",
4673 "sys_tgkill", /* 270 */
4674 "sys_utimes",
4675 "sys_fadvise64_64",
4676 "sys_ni_syscall" /* sys_vserver */
4677 };
4678
4679 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4680 switch (uEAX)
4681 {
4682 default:
4683 if (uEAX < RT_ELEMENTS(apsz))
4684 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4685 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4686 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4687 else
4688 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4689 break;
4690
4691 }
4692}
4693
4694
4695/**
4696 * Dumps an OpenBSD system call.
4697 * @param pVM VM handle.
4698 */
4699void remR3DumpOBsdSyscall(PVM pVM)
4700{
4701 static const char *apsz[] =
4702 {
4703 "SYS_syscall", //0
4704 "SYS_exit", //1
4705 "SYS_fork", //2
4706 "SYS_read", //3
4707 "SYS_write", //4
4708 "SYS_open", //5
4709 "SYS_close", //6
4710 "SYS_wait4", //7
4711 "SYS_8",
4712 "SYS_link", //9
4713 "SYS_unlink", //10
4714 "SYS_11",
4715 "SYS_chdir", //12
4716 "SYS_fchdir", //13
4717 "SYS_mknod", //14
4718 "SYS_chmod", //15
4719 "SYS_chown", //16
4720 "SYS_break", //17
4721 "SYS_18",
4722 "SYS_19",
4723 "SYS_getpid", //20
4724 "SYS_mount", //21
4725 "SYS_unmount", //22
4726 "SYS_setuid", //23
4727 "SYS_getuid", //24
4728 "SYS_geteuid", //25
4729 "SYS_ptrace", //26
4730 "SYS_recvmsg", //27
4731 "SYS_sendmsg", //28
4732 "SYS_recvfrom", //29
4733 "SYS_accept", //30
4734 "SYS_getpeername", //31
4735 "SYS_getsockname", //32
4736 "SYS_access", //33
4737 "SYS_chflags", //34
4738 "SYS_fchflags", //35
4739 "SYS_sync", //36
4740 "SYS_kill", //37
4741 "SYS_38",
4742 "SYS_getppid", //39
4743 "SYS_40",
4744 "SYS_dup", //41
4745 "SYS_opipe", //42
4746 "SYS_getegid", //43
4747 "SYS_profil", //44
4748 "SYS_ktrace", //45
4749 "SYS_sigaction", //46
4750 "SYS_getgid", //47
4751 "SYS_sigprocmask", //48
4752 "SYS_getlogin", //49
4753 "SYS_setlogin", //50
4754 "SYS_acct", //51
4755 "SYS_sigpending", //52
4756 "SYS_osigaltstack", //53
4757 "SYS_ioctl", //54
4758 "SYS_reboot", //55
4759 "SYS_revoke", //56
4760 "SYS_symlink", //57
4761 "SYS_readlink", //58
4762 "SYS_execve", //59
4763 "SYS_umask", //60
4764 "SYS_chroot", //61
4765 "SYS_62",
4766 "SYS_63",
4767 "SYS_64",
4768 "SYS_65",
4769 "SYS_vfork", //66
4770 "SYS_67",
4771 "SYS_68",
4772 "SYS_sbrk", //69
4773 "SYS_sstk", //70
4774 "SYS_61",
4775 "SYS_vadvise", //72
4776 "SYS_munmap", //73
4777 "SYS_mprotect", //74
4778 "SYS_madvise", //75
4779 "SYS_76",
4780 "SYS_77",
4781 "SYS_mincore", //78
4782 "SYS_getgroups", //79
4783 "SYS_setgroups", //80
4784 "SYS_getpgrp", //81
4785 "SYS_setpgid", //82
4786 "SYS_setitimer", //83
4787 "SYS_84",
4788 "SYS_85",
4789 "SYS_getitimer", //86
4790 "SYS_87",
4791 "SYS_88",
4792 "SYS_89",
4793 "SYS_dup2", //90
4794 "SYS_91",
4795 "SYS_fcntl", //92
4796 "SYS_select", //93
4797 "SYS_94",
4798 "SYS_fsync", //95
4799 "SYS_setpriority", //96
4800 "SYS_socket", //97
4801 "SYS_connect", //98
4802 "SYS_99",
4803 "SYS_getpriority", //100
4804 "SYS_101",
4805 "SYS_102",
4806 "SYS_sigreturn", //103
4807 "SYS_bind", //104
4808 "SYS_setsockopt", //105
4809 "SYS_listen", //106
4810 "SYS_107",
4811 "SYS_108",
4812 "SYS_109",
4813 "SYS_110",
4814 "SYS_sigsuspend", //111
4815 "SYS_112",
4816 "SYS_113",
4817 "SYS_114",
4818 "SYS_115",
4819 "SYS_gettimeofday", //116
4820 "SYS_getrusage", //117
4821 "SYS_getsockopt", //118
4822 "SYS_119",
4823 "SYS_readv", //120
4824 "SYS_writev", //121
4825 "SYS_settimeofday", //122
4826 "SYS_fchown", //123
4827 "SYS_fchmod", //124
4828 "SYS_125",
4829 "SYS_setreuid", //126
4830 "SYS_setregid", //127
4831 "SYS_rename", //128
4832 "SYS_129",
4833 "SYS_130",
4834 "SYS_flock", //131
4835 "SYS_mkfifo", //132
4836 "SYS_sendto", //133
4837 "SYS_shutdown", //134
4838 "SYS_socketpair", //135
4839 "SYS_mkdir", //136
4840 "SYS_rmdir", //137
4841 "SYS_utimes", //138
4842 "SYS_139",
4843 "SYS_adjtime", //140
4844 "SYS_141",
4845 "SYS_142",
4846 "SYS_143",
4847 "SYS_144",
4848 "SYS_145",
4849 "SYS_146",
4850 "SYS_setsid", //147
4851 "SYS_quotactl", //148
4852 "SYS_149",
4853 "SYS_150",
4854 "SYS_151",
4855 "SYS_152",
4856 "SYS_153",
4857 "SYS_154",
4858 "SYS_nfssvc", //155
4859 "SYS_156",
4860 "SYS_157",
4861 "SYS_158",
4862 "SYS_159",
4863 "SYS_160",
4864 "SYS_getfh", //161
4865 "SYS_162",
4866 "SYS_163",
4867 "SYS_164",
4868 "SYS_sysarch", //165
4869 "SYS_166",
4870 "SYS_167",
4871 "SYS_168",
4872 "SYS_169",
4873 "SYS_170",
4874 "SYS_171",
4875 "SYS_172",
4876 "SYS_pread", //173
4877 "SYS_pwrite", //174
4878 "SYS_175",
4879 "SYS_176",
4880 "SYS_177",
4881 "SYS_178",
4882 "SYS_179",
4883 "SYS_180",
4884 "SYS_setgid", //181
4885 "SYS_setegid", //182
4886 "SYS_seteuid", //183
4887 "SYS_lfs_bmapv", //184
4888 "SYS_lfs_markv", //185
4889 "SYS_lfs_segclean", //186
4890 "SYS_lfs_segwait", //187
4891 "SYS_188",
4892 "SYS_189",
4893 "SYS_190",
4894 "SYS_pathconf", //191
4895 "SYS_fpathconf", //192
4896 "SYS_swapctl", //193
4897 "SYS_getrlimit", //194
4898 "SYS_setrlimit", //195
4899 "SYS_getdirentries", //196
4900 "SYS_mmap", //197
4901 "SYS___syscall", //198
4902 "SYS_lseek", //199
4903 "SYS_truncate", //200
4904 "SYS_ftruncate", //201
4905 "SYS___sysctl", //202
4906 "SYS_mlock", //203
4907 "SYS_munlock", //204
4908 "SYS_205",
4909 "SYS_futimes", //206
4910 "SYS_getpgid", //207
4911 "SYS_xfspioctl", //208
4912 "SYS_209",
4913 "SYS_210",
4914 "SYS_211",
4915 "SYS_212",
4916 "SYS_213",
4917 "SYS_214",
4918 "SYS_215",
4919 "SYS_216",
4920 "SYS_217",
4921 "SYS_218",
4922 "SYS_219",
4923 "SYS_220",
4924 "SYS_semget", //221
4925 "SYS_222",
4926 "SYS_223",
4927 "SYS_224",
4928 "SYS_msgget", //225
4929 "SYS_msgsnd", //226
4930 "SYS_msgrcv", //227
4931 "SYS_shmat", //228
4932 "SYS_229",
4933 "SYS_shmdt", //230
4934 "SYS_231",
4935 "SYS_clock_gettime", //232
4936 "SYS_clock_settime", //233
4937 "SYS_clock_getres", //234
4938 "SYS_235",
4939 "SYS_236",
4940 "SYS_237",
4941 "SYS_238",
4942 "SYS_239",
4943 "SYS_nanosleep", //240
4944 "SYS_241",
4945 "SYS_242",
4946 "SYS_243",
4947 "SYS_244",
4948 "SYS_245",
4949 "SYS_246",
4950 "SYS_247",
4951 "SYS_248",
4952 "SYS_249",
4953 "SYS_minherit", //250
4954 "SYS_rfork", //251
4955 "SYS_poll", //252
4956 "SYS_issetugid", //253
4957 "SYS_lchown", //254
4958 "SYS_getsid", //255
4959 "SYS_msync", //256
4960 "SYS_257",
4961 "SYS_258",
4962 "SYS_259",
4963 "SYS_getfsstat", //260
4964 "SYS_statfs", //261
4965 "SYS_fstatfs", //262
4966 "SYS_pipe", //263
4967 "SYS_fhopen", //264
4968 "SYS_265",
4969 "SYS_fhstatfs", //266
4970 "SYS_preadv", //267
4971 "SYS_pwritev", //268
4972 "SYS_kqueue", //269
4973 "SYS_kevent", //270
4974 "SYS_mlockall", //271
4975 "SYS_munlockall", //272
4976 "SYS_getpeereid", //273
4977 "SYS_274",
4978 "SYS_275",
4979 "SYS_276",
4980 "SYS_277",
4981 "SYS_278",
4982 "SYS_279",
4983 "SYS_280",
4984 "SYS_getresuid", //281
4985 "SYS_setresuid", //282
4986 "SYS_getresgid", //283
4987 "SYS_setresgid", //284
4988 "SYS_285",
4989 "SYS_mquery", //286
4990 "SYS_closefrom", //287
4991 "SYS_sigaltstack", //288
4992 "SYS_shmget", //289
4993 "SYS_semop", //290
4994 "SYS_stat", //291
4995 "SYS_fstat", //292
4996 "SYS_lstat", //293
4997 "SYS_fhstat", //294
4998 "SYS___semctl", //295
4999 "SYS_shmctl", //296
5000 "SYS_msgctl", //297
5001 "SYS_MAXSYSCALL", //298
5002 //299
5003 //300
5004 };
5005 uint32_t uEAX;
5006 if (!LogIsEnabled())
5007 return;
5008 uEAX = CPUMGetGuestEAX(pVM);
5009 switch (uEAX)
5010 {
5011 default:
5012 if (uEAX < RT_ELEMENTS(apsz))
5013 {
5014 uint32_t au32Args[8] = {0};
5015 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
5016 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5017 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5018 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5019 }
5020 else
5021 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
5022 break;
5023 }
5024}
5025
5026
5027#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5028/**
5029 * The Dll main entry point (stub).
5030 */
5031bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5032{
5033 return true;
5034}
5035
5036void *memcpy(void *dst, const void *src, size_t size)
5037{
5038 uint8_t*pbDst = dst, *pbSrc = src;
5039 while (size-- > 0)
5040 *pbDst++ = *pbSrc++;
5041 return dst;
5042}
5043
5044#endif
5045
5046void cpu_smm_update(CPUState* env)
5047{
5048}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette