VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 4535

Last change on this file since 4535 was 4535, checked in by vboxsync, 17 years ago

Switched to reading and writing through PGM (like we already did for 64 bits hosts)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 148.3 KB
Line 
1/* $Id: VBoxRecompiler.c 4535 2007-09-05 14:20:59Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_REM
23#include "vl.h"
24#include "exec-all.h"
25
26#include <VBox/rem.h>
27#include <VBox/vmapi.h>
28#include <VBox/tm.h>
29#include <VBox/ssm.h>
30#include <VBox/em.h>
31#include <VBox/trpm.h>
32#include <VBox/iom.h>
33#include <VBox/mm.h>
34#include <VBox/pgm.h>
35#include <VBox/pdm.h>
36#include <VBox/dbgf.h>
37#include <VBox/dbg.h>
38#include <VBox/hwaccm.h>
39#include <VBox/patm.h>
40#include <VBox/csam.h>
41#include "REMInternal.h"
42#include <VBox/vm.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46#include <VBox/log.h>
47#include <iprt/semaphore.h>
48#include <iprt/asm.h>
49#include <iprt/assert.h>
50#include <iprt/thread.h>
51#include <iprt/string.h>
52
53/* Don't wanna include everything. */
54extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
55extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
56extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
57extern void tlb_flush_page(CPUX86State *env, uint32_t addr);
58extern void tlb_flush(CPUState *env, int flush_global);
59extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
60extern void sync_ldtr(CPUX86State *env1, int selector);
61extern int sync_tr(CPUX86State *env1, int selector);
62
63#ifdef VBOX_STRICT
64unsigned long get_phys_page_offset(target_ulong addr);
65#endif
66
67
68/*******************************************************************************
69* Defined Constants And Macros *
70*******************************************************************************/
71
72/** Copy 80-bit fpu register at pSrc to pDst.
73 * This is probably faster than *calling* memcpy.
74 */
75#define REM_COPY_FPU_REG(pDst, pSrc) \
76 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
77
78
79/*******************************************************************************
80* Internal Functions *
81*******************************************************************************/
82static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
83static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
84static void remR3StateUpdate(PVM pVM);
85
86static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
87static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
88static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
89static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
90static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
91static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
92
93static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
94static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
96static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
97static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99
100
101/*******************************************************************************
102* Global Variables *
103*******************************************************************************/
104
105/** @todo Move stats to REM::s some rainy day we have nothing do to. */
106#ifdef VBOX_WITH_STATISTICS
107static STAMPROFILEADV gStatExecuteSingleInstr;
108static STAMPROFILEADV gStatCompilationQEmu;
109static STAMPROFILEADV gStatRunCodeQEmu;
110static STAMPROFILEADV gStatTotalTimeQEmu;
111static STAMPROFILEADV gStatTimers;
112static STAMPROFILEADV gStatTBLookup;
113static STAMPROFILEADV gStatIRQ;
114static STAMPROFILEADV gStatRawCheck;
115static STAMPROFILEADV gStatMemRead;
116static STAMPROFILEADV gStatMemWrite;
117static STAMPROFILE gStatGCPhys2HCVirt;
118static STAMPROFILE gStatHCVirt2GCPhys;
119static STAMCOUNTER gStatCpuGetTSC;
120static STAMCOUNTER gStatRefuseTFInhibit;
121static STAMCOUNTER gStatRefuseVM86;
122static STAMCOUNTER gStatRefusePaging;
123static STAMCOUNTER gStatRefusePAE;
124static STAMCOUNTER gStatRefuseIOPLNot0;
125static STAMCOUNTER gStatRefuseIF0;
126static STAMCOUNTER gStatRefuseCode16;
127static STAMCOUNTER gStatRefuseWP0;
128static STAMCOUNTER gStatRefuseRing1or2;
129static STAMCOUNTER gStatRefuseCanExecute;
130static STAMCOUNTER gStatREMGDTChange;
131static STAMCOUNTER gStatREMIDTChange;
132static STAMCOUNTER gStatREMLDTRChange;
133static STAMCOUNTER gStatREMTRChange;
134static STAMCOUNTER gStatSelOutOfSync[6];
135static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
136#endif
137
138/*
139 * Global stuff.
140 */
141
142/** MMIO read callbacks. */
143CPUReadMemoryFunc *g_apfnMMIORead[3] =
144{
145 remR3MMIOReadU8,
146 remR3MMIOReadU16,
147 remR3MMIOReadU32
148};
149
150/** MMIO write callbacks. */
151CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
152{
153 remR3MMIOWriteU8,
154 remR3MMIOWriteU16,
155 remR3MMIOWriteU32
156};
157
158/** Handler read callbacks. */
159CPUReadMemoryFunc *g_apfnHandlerRead[3] =
160{
161 remR3HandlerReadU8,
162 remR3HandlerReadU16,
163 remR3HandlerReadU32
164};
165
166/** Handler write callbacks. */
167CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
168{
169 remR3HandlerWriteU8,
170 remR3HandlerWriteU16,
171 remR3HandlerWriteU32
172};
173
174
175#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDWS) && defined(RT_ARCH_AMD64))
176/*
177 * Debugger commands.
178 */
179static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
180
181/** '.remstep' arguments. */
182static const DBGCVARDESC g_aArgRemStep[] =
183{
184 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
185 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
186};
187
188/** Command descriptors. */
189static const DBGCCMD g_aCmds[] =
190{
191 {
192 .pszCmd ="remstep",
193 .cArgsMin = 0,
194 .cArgsMax = 1,
195 .paArgDescs = &g_aArgRemStep[0],
196 .cArgDescs = ELEMENTS(g_aArgRemStep),
197 .pResultDesc = NULL,
198 .fFlags = 0,
199 .pfnHandler = remR3CmdDisasEnableStepping,
200 .pszSyntax = "[on/off]",
201 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
202 "If no arguments show the current state."
203 }
204};
205#endif
206
207
208/* Instantiate the structure signatures. */
209#define REM_STRUCT_OP 0
210#include "InnoTek/structs.h"
211
212
213
214/*******************************************************************************
215* Internal Functions *
216*******************************************************************************/
217static void remAbort(int rc, const char *pszTip);
218extern int testmath(void);
219
220/* Put them here to avoid unused variable warning. */
221AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
222#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
223AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
224#else
225AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
226#endif
227
228
229/**
230 * Initializes the REM.
231 *
232 * @returns VBox status code.
233 * @param pVM The VM to operate on.
234 */
235REMR3DECL(int) REMR3Init(PVM pVM)
236{
237 uint32_t u32Dummy;
238 unsigned i;
239
240 /*
241 * Assert sanity.
242 */
243 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
244 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
245 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
246#ifdef DEBUG
247 Assert(!testmath());
248#endif
249 ASSERT_STRUCT_TABLE(Misc);
250 ASSERT_STRUCT_TABLE(TLB);
251 ASSERT_STRUCT_TABLE(SegmentCache);
252 ASSERT_STRUCT_TABLE(XMMReg);
253 ASSERT_STRUCT_TABLE(MMXReg);
254 ASSERT_STRUCT_TABLE(float_status);
255 ASSERT_STRUCT_TABLE(float32u);
256 ASSERT_STRUCT_TABLE(float64u);
257 ASSERT_STRUCT_TABLE(floatx80u);
258 ASSERT_STRUCT_TABLE(CPUState);
259
260 /*
261 * Init some internal data members.
262 */
263 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
264 pVM->rem.s.Env.pVM = pVM;
265#ifdef CPU_RAW_MODE_INIT
266 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
267#endif
268
269 /* ctx. */
270 int rc = CPUMQueryGuestCtxPtr(pVM, &pVM->rem.s.pCtx);
271 if (VBOX_FAILURE(rc))
272 {
273 AssertMsgFailed(("Failed to obtain guest ctx pointer. rc=%Vrc\n", rc));
274 return rc;
275 }
276 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
277
278 /* ignore all notifications */
279 pVM->rem.s.fIgnoreAll = true;
280
281 /*
282 * Init the recompiler.
283 */
284 if (!cpu_x86_init(&pVM->rem.s.Env))
285 {
286 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
287 return VERR_GENERAL_FAILURE;
288 }
289 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
290 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
291
292 /* allocate code buffer for single instruction emulation. */
293 pVM->rem.s.Env.cbCodeBuffer = 4096;
294 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
295 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
296
297 /* finally, set the cpu_single_env global. */
298 cpu_single_env = &pVM->rem.s.Env;
299
300 /* Nothing is pending by default */
301 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
302
303 /*
304 * Register ram types.
305 */
306 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
307 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
308 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
309 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
310 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
311
312 /* stop ignoring. */
313 pVM->rem.s.fIgnoreAll = false;
314
315 /*
316 * Register the saved state data unit.
317 */
318 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
319 NULL, remR3Save, NULL,
320 NULL, remR3Load, NULL);
321 if (VBOX_FAILURE(rc))
322 return rc;
323
324#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
325 /*
326 * Debugger commands.
327 */
328 static bool fRegisteredCmds = false;
329 if (!fRegisteredCmds)
330 {
331 int rc = DBGCRegisterCommands(&g_aCmds[0], ELEMENTS(g_aCmds));
332 if (VBOX_SUCCESS(rc))
333 fRegisteredCmds = true;
334 }
335#endif
336
337#ifdef VBOX_WITH_STATISTICS
338 /*
339 * Statistics.
340 */
341 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
342 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
343 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
344 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
345 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
346 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
347 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
348 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
349 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
350 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
351 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
352 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
353
354 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
355
356 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
357 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
358 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
359 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
360 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
361 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
362 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
363 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
364 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
365 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
366
367 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
368 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
369 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
370 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
371
372 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
373 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
374 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
375 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
376 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
377 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
378
379 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
385
386
387#endif
388
389#ifdef DEBUG_ALL_LOGGING
390 loglevel = ~0;
391#endif
392
393 return rc;
394}
395
396
397/**
398 * Terminates the REM.
399 *
400 * Termination means cleaning up and freeing all resources,
401 * the VM it self is at this point powered off or suspended.
402 *
403 * @returns VBox status code.
404 * @param pVM The VM to operate on.
405 */
406REMR3DECL(int) REMR3Term(PVM pVM)
407{
408 return VINF_SUCCESS;
409}
410
411
412/**
413 * The VM is being reset.
414 *
415 * For the REM component this means to call the cpu_reset() and
416 * reinitialize some state variables.
417 *
418 * @param pVM VM handle.
419 */
420REMR3DECL(void) REMR3Reset(PVM pVM)
421{
422 /*
423 * Reset the REM cpu.
424 */
425 pVM->rem.s.fIgnoreAll = true;
426 cpu_reset(&pVM->rem.s.Env);
427 pVM->rem.s.cInvalidatedPages = 0;
428 pVM->rem.s.fIgnoreAll = false;
429}
430
431
432/**
433 * Execute state save operation.
434 *
435 * @returns VBox status code.
436 * @param pVM VM Handle.
437 * @param pSSM SSM operation handle.
438 */
439static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
440{
441 LogFlow(("remR3Save:\n"));
442
443 /*
444 * Save the required CPU Env bits.
445 * (Not much because we're never in REM when doing the save.)
446 */
447 PREM pRem = &pVM->rem.s;
448 Assert(!pRem->fInREM);
449 SSMR3PutU32(pSSM, pRem->Env.hflags);
450 SSMR3PutMem(pSSM, &pRem->Env, RT_OFFSETOF(CPUState, jmp_env));
451 SSMR3PutU32(pSSM, ~0); /* separator */
452
453 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
454 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
455
456 /*
457 * Save the REM stuff.
458 */
459 SSMR3PutUInt(pSSM, pRem->cInvalidatedPages);
460 unsigned i;
461 for (i = 0; i < pRem->cInvalidatedPages; i++)
462 SSMR3PutGCPtr(pSSM, pRem->aGCPtrInvalidatedPages[i]);
463
464 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
465
466 return SSMR3PutU32(pSSM, ~0); /* terminator */
467}
468
469
470/**
471 * Execute state load operation.
472 *
473 * @returns VBox status code.
474 * @param pVM VM Handle.
475 * @param pSSM SSM operation handle.
476 * @param u32Version Data layout version.
477 */
478static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
479{
480 uint32_t u32Dummy;
481 uint32_t fRawRing0 = false;
482 LogFlow(("remR3Load:\n"));
483
484 /*
485 * Validate version.
486 */
487 if (u32Version != REM_SAVED_STATE_VERSION)
488 {
489 Log(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
490 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
491 }
492
493 /*
494 * Do a reset to be on the safe side...
495 */
496 REMR3Reset(pVM);
497
498 /*
499 * Ignore all ignorable notifications.
500 * (Not doing this will cause serious trouble.)
501 */
502 pVM->rem.s.fIgnoreAll = true;
503
504 /*
505 * Load the required CPU Env bits.
506 * (Not much because we're never in REM when doing the save.)
507 */
508 PREM pRem = &pVM->rem.s;
509 Assert(!pRem->fInREM);
510 SSMR3GetU32(pSSM, &pRem->Env.hflags);
511 SSMR3GetMem(pSSM, &pRem->Env, RT_OFFSETOF(CPUState, jmp_env));
512 uint32_t u32Sep;
513 int rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
514 if (VBOX_FAILURE(rc))
515 return rc;
516 if (u32Sep != ~0)
517 {
518 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
519 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
520 }
521
522 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
523 SSMR3GetUInt(pSSM, &fRawRing0);
524 if (fRawRing0)
525 pRem->Env.state |= CPU_RAW_RING0;
526
527 /*
528 * Load the REM stuff.
529 */
530 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
531 if (VBOX_FAILURE(rc))
532 return rc;
533 if (pRem->cInvalidatedPages > ELEMENTS(pRem->aGCPtrInvalidatedPages))
534 {
535 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
536 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
537 }
538 unsigned i;
539 for (i = 0; i < pRem->cInvalidatedPages; i++)
540 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
541
542 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
543 if (VBOX_FAILURE(rc))
544 return rc;
545
546 /* check the terminator. */
547 rc = SSMR3GetU32(pSSM, &u32Sep);
548 if (VBOX_FAILURE(rc))
549 return rc;
550 if (u32Sep != ~0)
551 {
552 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
553 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
554 }
555
556 /*
557 * Get the CPUID features.
558 */
559 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
560 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
561
562 /*
563 * Sync the Load Flush the TLB
564 */
565 tlb_flush(&pRem->Env, 1);
566
567#if 0 /** @todo r=bird: this doesn't make sense. WHY? */
568 /*
569 * Clear all lazy flags (only FPU sync for now).
570 */
571 CPUMGetAndClearFPUUsedREM(pVM);
572#endif
573
574 /*
575 * Stop ignoring ignornable notifications.
576 */
577 pVM->rem.s.fIgnoreAll = false;
578
579 return VINF_SUCCESS;
580}
581
582
583
584#undef LOG_GROUP
585#define LOG_GROUP LOG_GROUP_REM_RUN
586
587/**
588 * Single steps an instruction in recompiled mode.
589 *
590 * Before calling this function the REM state needs to be in sync with
591 * the VM. Call REMR3State() to perform the sync. It's only necessary
592 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
593 * and after calling REMR3StateBack().
594 *
595 * @returns VBox status code.
596 *
597 * @param pVM VM Handle.
598 */
599REMR3DECL(int) REMR3Step(PVM pVM)
600{
601 /*
602 * Lock the REM - we don't wanna have anyone interrupting us
603 * while stepping - and enabled single stepping. We also ignore
604 * pending interrupts and suchlike.
605 */
606 int interrupt_request = pVM->rem.s.Env.interrupt_request;
607 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
608 pVM->rem.s.Env.interrupt_request = 0;
609 cpu_single_step(&pVM->rem.s.Env, 1);
610
611 /*
612 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
613 */
614 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
615 bool fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
616
617 /*
618 * Execute and handle the return code.
619 * We execute without enabling the cpu tick, so on success we'll
620 * just flip it on and off to make sure it moves
621 */
622 int rc = cpu_exec(&pVM->rem.s.Env);
623 if (rc == EXCP_DEBUG)
624 {
625 TMCpuTickResume(pVM);
626 TMCpuTickPause(pVM);
627 TMVirtualResume(pVM);
628 TMVirtualPause(pVM);
629 rc = VINF_EM_DBG_STEPPED;
630 }
631 else
632 {
633 AssertMsgFailed(("Damn, this shouldn't happen! cpu_exec returned %d while singlestepping\n", rc));
634 switch (rc)
635 {
636 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
637 case EXCP_HLT:
638 case EXCP_HALTED: rc = VINF_EM_HALT; break;
639 case EXCP_RC:
640 rc = pVM->rem.s.rc;
641 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
642 break;
643 default:
644 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
645 rc = VERR_INTERNAL_ERROR;
646 break;
647 }
648 }
649
650 /*
651 * Restore the stuff we changed to prevent interruption.
652 * Unlock the REM.
653 */
654 if (fBp)
655 {
656 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
657 Assert(rc2 == 0); NOREF(rc2);
658 }
659 cpu_single_step(&pVM->rem.s.Env, 0);
660 pVM->rem.s.Env.interrupt_request = interrupt_request;
661
662 return rc;
663}
664
665
666/**
667 * Set a breakpoint using the REM facilities.
668 *
669 * @returns VBox status code.
670 * @param pVM The VM handle.
671 * @param Address The breakpoint address.
672 * @thread The emulation thread.
673 */
674REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
675{
676 VM_ASSERT_EMT(pVM);
677 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
678 {
679 LogFlow(("REMR3BreakpointSet: Address=%VGv\n", Address));
680 return VINF_SUCCESS;
681 }
682 LogFlow(("REMR3BreakpointSet: Address=%VGv - failed!\n", Address));
683 return VERR_REM_NO_MORE_BP_SLOTS;
684}
685
686
687/**
688 * Clears a breakpoint set by REMR3BreakpointSet().
689 *
690 * @returns VBox status code.
691 * @param pVM The VM handle.
692 * @param Address The breakpoint address.
693 * @thread The emulation thread.
694 */
695REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
696{
697 VM_ASSERT_EMT(pVM);
698 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
699 {
700 LogFlow(("REMR3BreakpointClear: Address=%VGv\n", Address));
701 return VINF_SUCCESS;
702 }
703 LogFlow(("REMR3BreakpointClear: Address=%VGv - not found!\n", Address));
704 return VERR_REM_BP_NOT_FOUND;
705}
706
707
708/**
709 * Emulate an instruction.
710 *
711 * This function executes one instruction without letting anyone
712 * interrupt it. This is intended for being called while being in
713 * raw mode and thus will take care of all the state syncing between
714 * REM and the rest.
715 *
716 * @returns VBox status code.
717 * @param pVM VM handle.
718 */
719REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
720{
721 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", pVM->rem.s.pCtx->cs, pVM->rem.s.pCtx->eip));
722
723 /*
724 * Sync the state and enable single instruction / single stepping.
725 */
726 int rc = REMR3State(pVM);
727 if (VBOX_SUCCESS(rc))
728 {
729 int interrupt_request = pVM->rem.s.Env.interrupt_request;
730 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
731 Assert(!pVM->rem.s.Env.singlestep_enabled);
732#if 1
733
734 /*
735 * Now we set the execute single instruction flag and enter the cpu_exec loop.
736 */
737 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
738 rc = cpu_exec(&pVM->rem.s.Env);
739 switch (rc)
740 {
741 /*
742 * Executed without anything out of the way happening.
743 */
744 case EXCP_SINGLE_INSTR:
745 rc = VINF_EM_RESCHEDULE;
746 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
747 break;
748
749 /*
750 * If we take a trap or start servicing a pending interrupt, we might end up here.
751 * (Timer thread or some other thread wishing EMT's attention.)
752 */
753 case EXCP_INTERRUPT:
754 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
755 rc = VINF_EM_RESCHEDULE;
756 break;
757
758 /*
759 * Single step, we assume!
760 * If there was a breakpoint there we're fucked now.
761 */
762 case EXCP_DEBUG:
763 {
764 /* breakpoint or single step? */
765 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
766 int iBP;
767 rc = VINF_EM_DBG_STEPPED;
768 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
769 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
770 {
771 rc = VINF_EM_DBG_BREAKPOINT;
772 break;
773 }
774 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Vrc iBP=%d GCPtrPC=%VGv\n", rc, iBP, GCPtrPC));
775 break;
776 }
777
778 /*
779 * hlt instruction.
780 */
781 case EXCP_HLT:
782 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
783 rc = VINF_EM_HALT;
784 break;
785
786 /*
787 * The VM has halted.
788 */
789 case EXCP_HALTED:
790 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
791 rc = VINF_EM_HALT;
792 break;
793
794 /*
795 * Switch to RAW-mode.
796 */
797 case EXCP_EXECUTE_RAW:
798 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
799 rc = VINF_EM_RESCHEDULE_RAW;
800 break;
801
802 /*
803 * Switch to hardware accelerated RAW-mode.
804 */
805 case EXCP_EXECUTE_HWACC:
806 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
807 rc = VINF_EM_RESCHEDULE_HWACC;
808 break;
809
810 /*
811 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
812 */
813 case EXCP_RC:
814 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
815 rc = pVM->rem.s.rc;
816 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
817 break;
818
819 /*
820 * Figure out the rest when they arrive....
821 */
822 default:
823 AssertMsgFailed(("rc=%d\n", rc));
824 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
825 rc = VINF_EM_RESCHEDULE;
826 break;
827 }
828
829 /*
830 * Switch back the state.
831 */
832#else
833 pVM->rem.s.Env.interrupt_request = 0;
834 cpu_single_step(&pVM->rem.s.Env, 1);
835
836 /*
837 * Execute and handle the return code.
838 * We execute without enabling the cpu tick, so on success we'll
839 * just flip it on and off to make sure it moves.
840 *
841 * (We do not use emulate_single_instr() because that doesn't enter the
842 * right way in will cause serious trouble if a longjmp was attempted.)
843 */
844# ifdef DEBUG_bird
845 remR3DisasInstr(&pVM->rem.s.Env, 1, "REMR3EmulateInstruction");
846# endif
847 int cTimesMax = 16384;
848 uint32_t eip = pVM->rem.s.Env.eip;
849 do
850 {
851 rc = cpu_exec(&pVM->rem.s.Env);
852
853 } while ( eip == pVM->rem.s.Env.eip
854 && (rc == EXCP_DEBUG || rc == EXCP_EXECUTE_RAW)
855 && --cTimesMax > 0);
856 switch (rc)
857 {
858 /*
859 * Single step, we assume!
860 * If there was a breakpoint there we're fucked now.
861 */
862 case EXCP_DEBUG:
863 {
864 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG\n"));
865 rc = VINF_EM_RESCHEDULE;
866 break;
867 }
868
869 /*
870 * We cannot be interrupted!
871 */
872 case EXCP_INTERRUPT:
873 AssertMsgFailed(("Shouldn't happen! Everything was locked!\n"));
874 rc = VERR_INTERNAL_ERROR;
875 break;
876
877 /*
878 * hlt instruction.
879 */
880 case EXCP_HLT:
881 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
882 rc = VINF_EM_HALT;
883 break;
884
885 /*
886 * The VM has halted.
887 */
888 case EXCP_HALTED:
889 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
890 rc = VINF_EM_HALT;
891 break;
892
893 /*
894 * Switch to RAW-mode.
895 */
896 case EXCP_EXECUTE_RAW:
897 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
898 rc = VINF_EM_RESCHEDULE_RAW;
899 break;
900
901 /*
902 * Switch to hardware accelerated RAW-mode.
903 */
904 case EXCP_EXECUTE_HWACC:
905 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
906 rc = VINF_EM_RESCHEDULE_HWACC;
907 break;
908
909 /*
910 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
911 */
912 case EXCP_RC:
913 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC rc=%Vrc\n", pVM->rem.s.rc));
914 rc = pVM->rem.s.rc;
915 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
916 break;
917
918 /*
919 * Figure out the rest when they arrive....
920 */
921 default:
922 AssertMsgFailed(("rc=%d\n", rc));
923 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
924 rc = VINF_SUCCESS;
925 break;
926 }
927
928 /*
929 * Switch back the state.
930 */
931 cpu_single_step(&pVM->rem.s.Env, 0);
932#endif
933 pVM->rem.s.Env.interrupt_request = interrupt_request;
934 int rc2 = REMR3StateBack(pVM);
935 AssertRC(rc2);
936 }
937
938 Log2(("REMR3EmulateInstruction: returns %Vrc (cs:eip=%04x:%08x)\n",
939 rc, pVM->rem.s.Env.segs[R_CS].selector, pVM->rem.s.Env.eip));
940 return rc;
941}
942
943
944/**
945 * Runs code in recompiled mode.
946 *
947 * Before calling this function the REM state needs to be in sync with
948 * the VM. Call REMR3State() to perform the sync. It's only necessary
949 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
950 * and after calling REMR3StateBack().
951 *
952 * @returns VBox status code.
953 *
954 * @param pVM VM Handle.
955 */
956REMR3DECL(int) REMR3Run(PVM pVM)
957{
958 Log2(("REMR3Run: (cs:eip=%04x:%08x)\n", pVM->rem.s.Env.segs[R_CS].selector, pVM->rem.s.Env.eip));
959 Assert(pVM->rem.s.fInREM);
960////Keyboard / tb stuff:
961//if ( pVM->rem.s.Env.segs[R_CS].selector == 0xf000
962// && pVM->rem.s.Env.eip >= 0xe860
963// && pVM->rem.s.Env.eip <= 0xe880)
964// pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
965////A20:
966//if ( pVM->rem.s.Env.segs[R_CS].selector == 0x9020
967// && pVM->rem.s.Env.eip >= 0x970
968// && pVM->rem.s.Env.eip <= 0x9a0)
969// pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
970////Speaker (port 61h)
971//if ( pVM->rem.s.Env.segs[R_CS].selector == 0x0010
972// && ( (pVM->rem.s.Env.eip >= 0x90278c10 && pVM->rem.s.Env.eip <= 0x90278c30)
973// || (pVM->rem.s.Env.eip >= 0x9010e250 && pVM->rem.s.Env.eip <= 0x9010e260)
974// )
975// )
976// pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
977//DBGFR3InfoLog(pVM, "timers", NULL);
978
979
980 int rc = cpu_exec(&pVM->rem.s.Env);
981 switch (rc)
982 {
983 /*
984 * This happens when the execution was interrupted
985 * by an external event, like pending timers.
986 */
987 case EXCP_INTERRUPT:
988 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
989 rc = VINF_SUCCESS;
990 break;
991
992 /*
993 * hlt instruction.
994 */
995 case EXCP_HLT:
996 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
997 rc = VINF_EM_HALT;
998 break;
999
1000 /*
1001 * The VM has halted.
1002 */
1003 case EXCP_HALTED:
1004 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1005 rc = VINF_EM_HALT;
1006 break;
1007
1008 /*
1009 * Breakpoint/single step.
1010 */
1011 case EXCP_DEBUG:
1012 {
1013#if 0//def DEBUG_bird
1014 static int iBP = 0;
1015 printf("howdy, breakpoint! iBP=%d\n", iBP);
1016 switch (iBP)
1017 {
1018 case 0:
1019 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1020 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1021 //pVM->rem.s.Env.interrupt_request = 0;
1022 //pVM->rem.s.Env.exception_index = -1;
1023 //g_fInterruptDisabled = 1;
1024 rc = VINF_SUCCESS;
1025 asm("int3");
1026 break;
1027 default:
1028 asm("int3");
1029 break;
1030 }
1031 iBP++;
1032#else
1033 /* breakpoint or single step? */
1034 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1035 int iBP;
1036 rc = VINF_EM_DBG_STEPPED;
1037 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1038 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1039 {
1040 rc = VINF_EM_DBG_BREAKPOINT;
1041 break;
1042 }
1043 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Vrc iBP=%d GCPtrPC=%VGv\n", rc, iBP, GCPtrPC));
1044#endif
1045 break;
1046 }
1047
1048 /*
1049 * Switch to RAW-mode.
1050 */
1051 case EXCP_EXECUTE_RAW:
1052 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1053 rc = VINF_EM_RESCHEDULE_RAW;
1054 break;
1055
1056 /*
1057 * Switch to hardware accelerated RAW-mode.
1058 */
1059 case EXCP_EXECUTE_HWACC:
1060 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1061 rc = VINF_EM_RESCHEDULE_HWACC;
1062 break;
1063
1064 /*
1065 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1066 */
1067 case EXCP_RC:
1068 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Vrc\n", pVM->rem.s.rc));
1069 rc = pVM->rem.s.rc;
1070 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1071 break;
1072
1073 /*
1074 * Figure out the rest when they arrive....
1075 */
1076 default:
1077 AssertMsgFailed(("rc=%d\n", rc));
1078 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1079 rc = VINF_SUCCESS;
1080 break;
1081 }
1082
1083 Log2(("REMR3Run: returns %Vrc (cs:eip=%04x:%08x)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, pVM->rem.s.Env.eip));
1084 return rc;
1085}
1086
1087
1088/**
1089 * Check if the cpu state is suitable for Raw execution.
1090 *
1091 * @returns boolean
1092 * @param env The CPU env struct.
1093 * @param eip The EIP to check this for (might differ from env->eip).
1094 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1095 * @param pExceptionIndex Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1096 *
1097 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1098 */
1099bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, uint32_t *pExceptionIndex)
1100{
1101 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1102 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1103 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1104
1105 /* Update counter. */
1106 env->pVM->rem.s.cCanExecuteRaw++;
1107
1108 if (HWACCMIsEnabled(env->pVM))
1109 {
1110 env->state |= CPU_RAW_HWACC;
1111
1112 /*
1113 * Create partial context for HWACCMR3CanExecuteGuest
1114 */
1115 CPUMCTX Ctx;
1116 Ctx.cr0 = env->cr[0];
1117 Ctx.cr3 = env->cr[3];
1118 Ctx.cr4 = env->cr[4];
1119
1120 Ctx.tr = env->tr.selector;
1121 Ctx.trHid.u32Base = (uint32_t)env->tr.base;
1122 Ctx.trHid.u32Limit = env->tr.limit;
1123 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1124
1125 Ctx.idtr.cbIdt = env->idt.limit;
1126 Ctx.idtr.pIdt = (uint32_t)env->idt.base;
1127
1128 Ctx.eflags.u32 = env->eflags;
1129
1130 Ctx.cs = env->segs[R_CS].selector;
1131 Ctx.csHid.u32Base = (uint32_t)env->segs[R_CS].base;
1132 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1133 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1134
1135 Ctx.ss = env->segs[R_SS].selector;
1136 Ctx.ssHid.u32Base = (uint32_t)env->segs[R_SS].base;
1137 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1138 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1139
1140 /* Hardware accelerated raw-mode:
1141 *
1142 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1143 */
1144 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1145 {
1146 *pExceptionIndex = EXCP_EXECUTE_HWACC;
1147 return true;
1148 }
1149 return false;
1150 }
1151
1152 /*
1153 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1154 * or 32 bits protected mode ring 0 code
1155 *
1156 * The tests are ordered by the likelyhood of being true during normal execution.
1157 */
1158 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1159 {
1160 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1161 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1162 return false;
1163 }
1164
1165#ifndef VBOX_RAW_V86
1166 if (fFlags & VM_MASK) {
1167 STAM_COUNTER_INC(&gStatRefuseVM86);
1168 Log2(("raw mode refused: VM_MASK\n"));
1169 return false;
1170 }
1171#endif
1172
1173 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1174 {
1175#ifndef DEBUG_bird
1176 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1177#endif
1178 return false;
1179 }
1180
1181 if (env->singlestep_enabled)
1182 {
1183 //Log2(("raw mode refused: Single step\n"));
1184 return false;
1185 }
1186
1187 if (env->nb_breakpoints > 0)
1188 {
1189 //Log2(("raw mode refused: Breakpoints\n"));
1190 return false;
1191 }
1192
1193 uint32_t u32CR0 = env->cr[0];
1194 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1195 {
1196 STAM_COUNTER_INC(&gStatRefusePaging);
1197 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1198 return false;
1199 }
1200
1201 if (env->cr[4] & CR4_PAE_MASK)
1202 {
1203 STAM_COUNTER_INC(&gStatRefusePAE);
1204 //Log2(("raw mode refused: PAE\n"));
1205 return false;
1206 }
1207
1208 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1209 {
1210 if (!EMIsRawRing3Enabled(env->pVM))
1211 return false;
1212
1213 if (!(env->eflags & IF_MASK))
1214 {
1215 STAM_COUNTER_INC(&gStatRefuseIF0);
1216 Log2(("raw mode refused: IF (RawR3)\n"));
1217 return false;
1218 }
1219
1220 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1221 {
1222 STAM_COUNTER_INC(&gStatRefuseWP0);
1223 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1224 return false;
1225 }
1226 }
1227 else
1228 {
1229 if (!EMIsRawRing0Enabled(env->pVM))
1230 return false;
1231
1232 // Let's start with pure 32 bits ring 0 code first
1233 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1234 {
1235 STAM_COUNTER_INC(&gStatRefuseCode16);
1236 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1237 return false;
1238 }
1239
1240 // Only R0
1241 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1242 {
1243 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1244 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1245 return false;
1246 }
1247
1248 if (!(u32CR0 & CR0_WP_MASK))
1249 {
1250 STAM_COUNTER_INC(&gStatRefuseWP0);
1251 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1252 return false;
1253 }
1254
1255 if (PATMIsPatchGCAddr(env->pVM, eip))
1256 {
1257 Log2(("raw r0 mode forced: patch code\n"));
1258 *pExceptionIndex = EXCP_EXECUTE_RAW;
1259 return true;
1260 }
1261
1262#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1263 if (!(env->eflags & IF_MASK))
1264 {
1265 STAM_COUNTER_INC(&gStatRefuseIF0);
1266 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1267 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1268 return false;
1269 }
1270#endif
1271
1272 env->state |= CPU_RAW_RING0;
1273 }
1274
1275 /*
1276 * Don't reschedule the first time we're called, because there might be
1277 * special reasons why we're here that is not covered by the above checks.
1278 */
1279 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1280 {
1281 Log2(("raw mode refused: first scheduling\n"));
1282 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1283 return false;
1284 }
1285
1286 Assert(PGMPhysIsA20Enabled(env->pVM));
1287 *pExceptionIndex = EXCP_EXECUTE_RAW;
1288 return true;
1289}
1290
1291
1292/**
1293 * Fetches a code byte.
1294 *
1295 * @returns Success indicator (bool) for ease of use.
1296 * @param env The CPU environment structure.
1297 * @param GCPtrInstr Where to fetch code.
1298 * @param pu8Byte Where to store the byte on success
1299 */
1300bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1301{
1302 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1303 if (VBOX_SUCCESS(rc))
1304 return true;
1305 return false;
1306}
1307
1308
1309/**
1310 * Flush (or invalidate if you like) page table/dir entry.
1311 *
1312 * (invlpg instruction; tlb_flush_page)
1313 *
1314 * @param env Pointer to cpu environment.
1315 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1316 */
1317void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1318{
1319 PVM pVM = env->pVM;
1320
1321 /*
1322 * When we're replaying invlpg instructions or restoring a saved
1323 * state we disable this path.
1324 */
1325 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1326 return;
1327 Log(("remR3FlushPage: GCPtr=%VGv\n", GCPtr));
1328 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1329
1330 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1331
1332 /*
1333 * Update the control registers before calling PGMFlushPage.
1334 */
1335 PCPUMCTX pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1336 pCtx->cr0 = env->cr[0];
1337 pCtx->cr3 = env->cr[3];
1338 pCtx->cr4 = env->cr[4];
1339
1340 /*
1341 * Let PGM do the rest.
1342 */
1343 int rc = PGMInvalidatePage(pVM, GCPtr);
1344 if (VBOX_FAILURE(rc))
1345 {
1346 AssertMsgFailed(("remR3FlushPage %x %x %x %d failed!!\n", GCPtr));
1347 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1348 }
1349 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1350}
1351
1352/**
1353 * Called from tlb_protect_code in order to write monitor a code page.
1354 *
1355 * @param env Pointer to the CPU environment.
1356 * @param GCPtr Code page to monitor
1357 */
1358void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1359{
1360 Assert(env->pVM->rem.s.fInREM);
1361 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1362 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1363 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1364 && !(env->eflags & VM_MASK) /* no V86 mode */
1365 && !HWACCMIsEnabled(env->pVM))
1366 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1367}
1368
1369/**
1370 * Called when the CPU is initialized, any of the CRx registers are changed or
1371 * when the A20 line is modified.
1372 *
1373 * @param env Pointer to the CPU environment.
1374 * @param fGlobal Set if the flush is global.
1375 */
1376void remR3FlushTLB(CPUState *env, bool fGlobal)
1377{
1378 PVM pVM = env->pVM;
1379
1380 /*
1381 * When we're replaying invlpg instructions or restoring a saved
1382 * state we disable this path.
1383 */
1384 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1385 return;
1386 Assert(pVM->rem.s.fInREM);
1387
1388 /*
1389 * The caller doesn't check cr4, so we have to do that for ourselves.
1390 */
1391 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1392 fGlobal = true;
1393 Log(("remR3FlushTLB: CR0=%VGp CR3=%VGp CR4=%VGp %s\n", env->cr[0], env->cr[3], env->cr[4], fGlobal ? " global" : ""));
1394
1395 /*
1396 * Update the control registers before calling PGMR3FlushTLB.
1397 */
1398 PCPUMCTX pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1399 pCtx->cr0 = env->cr[0];
1400 pCtx->cr3 = env->cr[3];
1401 pCtx->cr4 = env->cr[4];
1402
1403 /*
1404 * Let PGM do the rest.
1405 */
1406 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1407}
1408
1409
1410/**
1411 * Called when any of the cr0, cr4 or efer registers is updated.
1412 *
1413 * @param env Pointer to the CPU environment.
1414 */
1415void remR3ChangeCpuMode(CPUState *env)
1416{
1417 int rc;
1418 PVM pVM = env->pVM;
1419
1420 /*
1421 * When we're replaying loads or restoring a saved
1422 * state this path is disabled.
1423 */
1424 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1425 return;
1426 Assert(pVM->rem.s.fInREM);
1427
1428 /*
1429 * Update the control registers before calling PGMR3ChangeMode()
1430 * as it may need to map whatever cr3 is pointing to.
1431 */
1432 PCPUMCTX pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1433 pCtx->cr0 = env->cr[0];
1434 pCtx->cr3 = env->cr[3];
1435 pCtx->cr4 = env->cr[4];
1436
1437#ifdef TARGET_X86_64
1438 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1439 if (rc != VINF_SUCCESS)
1440 cpu_abort(env, "PGMChangeMode(, %08x, %08x, %016llx) -> %Vrc\n", env->cr[0], env->cr[4], env->efer, rc);
1441#else
1442 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1443 if (rc != VINF_SUCCESS)
1444 cpu_abort(env, "PGMChangeMode(, %08x, %08x, %016llx) -> %Vrc\n", env->cr[0], env->cr[4], 0LL, rc);
1445#endif
1446}
1447
1448
1449/**
1450 * Called from compiled code to run dma.
1451 *
1452 * @param env Pointer to the CPU environment.
1453 */
1454void remR3DmaRun(CPUState *env)
1455{
1456 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1457 PDMR3DmaRun(env->pVM);
1458 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1459}
1460
1461/**
1462 * Called from compiled code to schedule pending timers in VMM
1463 *
1464 * @param env Pointer to the CPU environment.
1465 */
1466void remR3TimersRun(CPUState *env)
1467{
1468 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1469 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1470 TMR3TimerQueuesDo(env->pVM);
1471 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1472 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1473}
1474
1475/**
1476 * Record trap occurance
1477 *
1478 * @returns VBox status code
1479 * @param env Pointer to the CPU environment.
1480 * @param uTrap Trap nr
1481 * @param uErrorCode Error code
1482 * @param pvNextEIP Next EIP
1483 */
1484int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, uint32_t pvNextEIP)
1485{
1486 PVM pVM = (PVM)env->pVM;
1487#ifdef VBOX_WITH_STATISTICS
1488 static STAMCOUNTER aStatTrap[255];
1489 static bool aRegisters[ELEMENTS(aStatTrap)];
1490#endif
1491
1492#ifdef VBOX_WITH_STATISTICS
1493 if (uTrap < 255)
1494 {
1495 if (!aRegisters[uTrap])
1496 {
1497 aRegisters[uTrap] = true;
1498 char szStatName[64];
1499 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1500 STAM_REG(env->pVM, &aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1501 }
1502 STAM_COUNTER_INC(&aStatTrap[uTrap]);
1503 }
1504#endif
1505 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%VGv eip=%VGv cr2=%08x\n", uTrap, uErrorCode, pvNextEIP, env->eip, env->cr[2]));
1506 if( uTrap < 0x20
1507 && (env->cr[0] & X86_CR0_PE)
1508 && !(env->eflags & X86_EFL_VM))
1509 {
1510#ifdef DEBUG
1511 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1512#endif
1513 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1514 {
1515 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%VGv eip=%VGv cr2=%08x\n", uTrap, uErrorCode, pvNextEIP, env->eip, env->cr[2]));
1516 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1517 return VERR_REM_TOO_MANY_TRAPS;
1518 }
1519 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1520 pVM->rem.s.cPendingExceptions = 1;
1521 pVM->rem.s.uPendingException = uTrap;
1522 pVM->rem.s.uPendingExcptEIP = env->eip;
1523 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1524 }
1525 else
1526 {
1527 pVM->rem.s.cPendingExceptions = 0;
1528 pVM->rem.s.uPendingException = uTrap;
1529 pVM->rem.s.uPendingExcptEIP = env->eip;
1530 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1531 }
1532 return VINF_SUCCESS;
1533}
1534
1535/*
1536 * Clear current active trap
1537 *
1538 * @param pVM VM Handle.
1539 */
1540void remR3TrapClear(PVM pVM)
1541{
1542 pVM->rem.s.cPendingExceptions = 0;
1543 pVM->rem.s.uPendingException = 0;
1544 pVM->rem.s.uPendingExcptEIP = 0;
1545 pVM->rem.s.uPendingExcptCR2 = 0;
1546}
1547
1548
1549/**
1550 * Syncs the internal REM state with the VM.
1551 *
1552 * This must be called before REMR3Run() is invoked whenever when the REM
1553 * state is not up to date. Calling it several times in a row is not
1554 * permitted.
1555 *
1556 * @returns VBox status code.
1557 *
1558 * @param pVM VM Handle.
1559 *
1560 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1561 * no do this since the majority of the callers don't want any unnecessary of events
1562 * pending that would immediatly interrupt execution.
1563 */
1564REMR3DECL(int) REMR3State(PVM pVM)
1565{
1566 Log2(("REMR3State:\n"));
1567 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1568 register const CPUMCTX *pCtx = pVM->rem.s.pCtx;
1569 register unsigned fFlags;
1570 bool fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1571
1572 Assert(!pVM->rem.s.fInREM);
1573 pVM->rem.s.fInStateSync = true;
1574
1575 /*
1576 * Copy the registers which requires no special handling.
1577 */
1578 Assert(R_EAX == 0);
1579 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1580 Assert(R_ECX == 1);
1581 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1582 Assert(R_EDX == 2);
1583 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1584 Assert(R_EBX == 3);
1585 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1586 Assert(R_ESP == 4);
1587 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1588 Assert(R_EBP == 5);
1589 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1590 Assert(R_ESI == 6);
1591 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1592 Assert(R_EDI == 7);
1593 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1594 pVM->rem.s.Env.eip = pCtx->eip;
1595
1596 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1597
1598 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1599
1600 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1601 pVM->rem.s.Env.dr[0] = pCtx->dr0;
1602 pVM->rem.s.Env.dr[1] = pCtx->dr1;
1603 pVM->rem.s.Env.dr[2] = pCtx->dr2;
1604 pVM->rem.s.Env.dr[3] = pCtx->dr3;
1605 pVM->rem.s.Env.dr[4] = pCtx->dr4;
1606 pVM->rem.s.Env.dr[5] = pCtx->dr5;
1607 pVM->rem.s.Env.dr[6] = pCtx->dr6;
1608 pVM->rem.s.Env.dr[7] = pCtx->dr7;
1609
1610 /*
1611 * Clear the halted hidden flag (the interrupt waking up the CPU can
1612 * have been dispatched in raw mode).
1613 */
1614 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1615
1616 /*
1617 * Replay invlpg?
1618 */
1619 if (pVM->rem.s.cInvalidatedPages)
1620 {
1621 pVM->rem.s.fIgnoreInvlPg = true;
1622 RTUINT i;
1623 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1624 {
1625 Log2(("REMR3State: invlpg %VGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1626 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1627 }
1628 pVM->rem.s.fIgnoreInvlPg = false;
1629 pVM->rem.s.cInvalidatedPages = 0;
1630 }
1631
1632 /*
1633 * Registers which are rarely changed and require special handling / order when changed.
1634 */
1635 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1636 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1637 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR | CPUM_CHANGED_TR
1638 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR))
1639 {
1640 if (fFlags & CPUM_CHANGED_FPU_REM)
1641 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1642
1643 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1644 {
1645 pVM->rem.s.fIgnoreCR3Load = true;
1646 tlb_flush(&pVM->rem.s.Env, true);
1647 pVM->rem.s.fIgnoreCR3Load = false;
1648 }
1649
1650 if (fFlags & CPUM_CHANGED_CR4)
1651 {
1652 pVM->rem.s.fIgnoreCR3Load = true;
1653 pVM->rem.s.fIgnoreCpuMode = true;
1654 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1655 pVM->rem.s.fIgnoreCpuMode = false;
1656 pVM->rem.s.fIgnoreCR3Load = false;
1657 }
1658
1659 if (fFlags & CPUM_CHANGED_CR0)
1660 {
1661 pVM->rem.s.fIgnoreCR3Load = true;
1662 pVM->rem.s.fIgnoreCpuMode = true;
1663 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1664 pVM->rem.s.fIgnoreCpuMode = false;
1665 pVM->rem.s.fIgnoreCR3Load = false;
1666 }
1667
1668 if (fFlags & CPUM_CHANGED_CR3)
1669 {
1670 pVM->rem.s.fIgnoreCR3Load = true;
1671 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1672 pVM->rem.s.fIgnoreCR3Load = false;
1673 }
1674
1675 if (fFlags & CPUM_CHANGED_GDTR)
1676 {
1677 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1678 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1679 }
1680
1681 if (fFlags & CPUM_CHANGED_IDTR)
1682 {
1683 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1684 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1685 }
1686
1687 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1688 {
1689 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1690 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1691 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1692 }
1693
1694 if (fFlags & CPUM_CHANGED_LDTR)
1695 {
1696 if (fHiddenSelRegsValid)
1697 {
1698 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1699 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u32Base;
1700 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1701 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;;
1702 }
1703 else
1704 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1705 }
1706
1707 if (fFlags & CPUM_CHANGED_TR)
1708 {
1709 if (fHiddenSelRegsValid)
1710 {
1711 pVM->rem.s.Env.tr.selector = pCtx->tr;
1712 pVM->rem.s.Env.tr.base = pCtx->trHid.u32Base;
1713 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1714 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;;
1715 }
1716 else
1717 sync_tr(&pVM->rem.s.Env, pCtx->tr);
1718
1719 /** @note do_interrupt will fault if the busy flag is still set.... */
1720 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1721 }
1722 }
1723
1724 /*
1725 * Update selector registers.
1726 * This must be done *after* we've synced gdt, ldt and crX registers
1727 * since we're reading the GDT/LDT om sync_seg. This will happen with
1728 * saved state which takes a quick dip into rawmode for instance.
1729 */
1730 /*
1731 * Stack; Note first check this one as the CPL might have changed. The
1732 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1733 */
1734
1735 if (fHiddenSelRegsValid)
1736 {
1737 /* The hidden selector registers are valid in the CPU context. */
1738 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1739
1740 /* Set current CPL */
1741 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1742
1743 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u32Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1744 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u32Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1745 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u32Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1746 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u32Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1747 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u32Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1748 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u32Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1749 }
1750 else
1751 {
1752 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1753 if (pVM->rem.s.Env.segs[R_SS].selector != (uint16_t)pCtx->ss)
1754 {
1755 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1756
1757 cpu_x86_set_cpl(&pVM->rem.s.Env, (pCtx->eflags.Bits.u1VM) ? 3 : (pCtx->ss & 3));
1758 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1759#ifdef VBOX_WITH_STATISTICS
1760 if (pVM->rem.s.Env.segs[R_SS].newselector)
1761 {
1762 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1763 }
1764#endif
1765 }
1766 else
1767 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1768
1769 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1770 {
1771 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1772 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1773#ifdef VBOX_WITH_STATISTICS
1774 if (pVM->rem.s.Env.segs[R_ES].newselector)
1775 {
1776 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1777 }
1778#endif
1779 }
1780 else
1781 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1782
1783 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1784 {
1785 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1786 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1787#ifdef VBOX_WITH_STATISTICS
1788 if (pVM->rem.s.Env.segs[R_CS].newselector)
1789 {
1790 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1791 }
1792#endif
1793 }
1794 else
1795 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1796
1797 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1798 {
1799 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1800 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1801#ifdef VBOX_WITH_STATISTICS
1802 if (pVM->rem.s.Env.segs[R_DS].newselector)
1803 {
1804 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1805 }
1806#endif
1807 }
1808 else
1809 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1810
1811 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
1812 * be the same but not the base/limit. */
1813 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
1814 {
1815 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
1816 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
1817#ifdef VBOX_WITH_STATISTICS
1818 if (pVM->rem.s.Env.segs[R_FS].newselector)
1819 {
1820 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
1821 }
1822#endif
1823 }
1824 else
1825 pVM->rem.s.Env.segs[R_FS].newselector = 0;
1826
1827 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
1828 {
1829 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
1830 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
1831#ifdef VBOX_WITH_STATISTICS
1832 if (pVM->rem.s.Env.segs[R_GS].newselector)
1833 {
1834 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
1835 }
1836#endif
1837 }
1838 else
1839 pVM->rem.s.Env.segs[R_GS].newselector = 0;
1840 }
1841
1842 /*
1843 * Check for traps.
1844 */
1845 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
1846 TRPMEVENT enmType;
1847 uint8_t u8TrapNo;
1848 int rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
1849 if (VBOX_SUCCESS(rc))
1850 {
1851 #ifdef DEBUG
1852 if (u8TrapNo == 0x80)
1853 {
1854 remR3DumpLnxSyscall(pVM);
1855 remR3DumpOBsdSyscall(pVM);
1856 }
1857 #endif
1858
1859 pVM->rem.s.Env.exception_index = u8TrapNo;
1860 if (enmType != TRPM_SOFTWARE_INT)
1861 {
1862 pVM->rem.s.Env.exception_is_int = 0;
1863 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
1864 }
1865 else
1866 {
1867 /*
1868 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
1869 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
1870 * for int03 and into.
1871 */
1872 pVM->rem.s.Env.exception_is_int = 1;
1873 pVM->rem.s.Env.exception_next_eip = pCtx->eip + 2;
1874 /* int 3 may be generated by one-byte 0xcc */
1875 if (u8TrapNo == 3)
1876 {
1877 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->eip) == 0xcc)
1878 pVM->rem.s.Env.exception_next_eip = pCtx->eip + 1;
1879 }
1880 /* int 4 may be generated by one-byte 0xce */
1881 else if (u8TrapNo == 4)
1882 {
1883 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->eip) == 0xce)
1884 pVM->rem.s.Env.exception_next_eip = pCtx->eip + 1;
1885 }
1886 }
1887
1888 /* get error code and cr2 if needed. */
1889 switch (u8TrapNo)
1890 {
1891 case 0x0e:
1892 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
1893 /* fallthru */
1894 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
1895 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
1896 break;
1897
1898 case 0x11: case 0x08:
1899 default:
1900 pVM->rem.s.Env.error_code = 0;
1901 break;
1902 }
1903
1904 /*
1905 * We can now reset the active trap since the recompiler is gonna have a go at it.
1906 */
1907 rc = TRPMResetTrap(pVM);
1908 AssertRC(rc);
1909 Log2(("REMR3State: trap=%02x errcd=%VGv cr2=%VGv nexteip=%VGv%s\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.error_code,
1910 pVM->rem.s.Env.cr[2], pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
1911 }
1912
1913 /*
1914 * Clear old interrupt request flags; Check for pending hardware interrupts.
1915 * (See @remark for why we don't check for other FFs.)
1916 */
1917 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
1918 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
1919 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
1920 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1921
1922 /*
1923 * We're now in REM mode.
1924 */
1925 pVM->rem.s.fInREM = true;
1926 pVM->rem.s.fInStateSync = false;
1927 pVM->rem.s.cCanExecuteRaw = 0;
1928 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
1929 Log2(("REMR3State: returns VINF_SUCCESS\n"));
1930 return VINF_SUCCESS;
1931}
1932
1933
1934/**
1935 * Syncs back changes in the REM state to the the VM state.
1936 *
1937 * This must be called after invoking REMR3Run().
1938 * Calling it several times in a row is not permitted.
1939 *
1940 * @returns VBox status code.
1941 *
1942 * @param pVM VM Handle.
1943 */
1944REMR3DECL(int) REMR3StateBack(PVM pVM)
1945{
1946 Log2(("REMR3StateBack:\n"));
1947 Assert(pVM->rem.s.fInREM);
1948 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
1949 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
1950
1951 /*
1952 * Copy back the registers.
1953 * This is done in the order they are declared in the CPUMCTX structure.
1954 */
1955
1956 /** @todo FOP */
1957 /** @todo FPUIP */
1958 /** @todo CS */
1959 /** @todo FPUDP */
1960 /** @todo DS */
1961 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
1962 pCtx->fpu.MXCSR = 0;
1963 pCtx->fpu.MXCSR_MASK = 0;
1964
1965 /** @todo check if FPU/XMM was actually used in the recompiler */
1966 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
1967//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
1968
1969 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
1970 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
1971 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
1972 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
1973 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
1974 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
1975 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
1976
1977 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
1978 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
1979
1980#ifdef VBOX_WITH_STATISTICS
1981 if (pVM->rem.s.Env.segs[R_SS].newselector)
1982 {
1983 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
1984 }
1985 if (pVM->rem.s.Env.segs[R_GS].newselector)
1986 {
1987 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
1988 }
1989 if (pVM->rem.s.Env.segs[R_FS].newselector)
1990 {
1991 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
1992 }
1993 if (pVM->rem.s.Env.segs[R_ES].newselector)
1994 {
1995 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
1996 }
1997 if (pVM->rem.s.Env.segs[R_DS].newselector)
1998 {
1999 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2000 }
2001 if (pVM->rem.s.Env.segs[R_CS].newselector)
2002 {
2003 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2004 }
2005#endif
2006 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2007 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2008 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2009 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2010 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2011
2012 pCtx->eip = pVM->rem.s.Env.eip;
2013 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2014
2015 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2016 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2017 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2018 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2019
2020 pCtx->dr0 = pVM->rem.s.Env.dr[0];
2021 pCtx->dr1 = pVM->rem.s.Env.dr[1];
2022 pCtx->dr2 = pVM->rem.s.Env.dr[2];
2023 pCtx->dr3 = pVM->rem.s.Env.dr[3];
2024 pCtx->dr4 = pVM->rem.s.Env.dr[4];
2025 pCtx->dr5 = pVM->rem.s.Env.dr[5];
2026 pCtx->dr6 = pVM->rem.s.Env.dr[6];
2027 pCtx->dr7 = pVM->rem.s.Env.dr[7];
2028
2029 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2030 if (pCtx->gdtr.pGdt != (uint32_t)pVM->rem.s.Env.gdt.base)
2031 {
2032 pCtx->gdtr.pGdt = (uint32_t)pVM->rem.s.Env.gdt.base;
2033 STAM_COUNTER_INC(&gStatREMGDTChange);
2034 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2035 }
2036
2037 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2038 if (pCtx->idtr.pIdt != (uint32_t)pVM->rem.s.Env.idt.base)
2039 {
2040 pCtx->idtr.pIdt = (uint32_t)pVM->rem.s.Env.idt.base;
2041 STAM_COUNTER_INC(&gStatREMIDTChange);
2042 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2043 }
2044
2045 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2046 {
2047 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2048 STAM_COUNTER_INC(&gStatREMLDTRChange);
2049 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2050 }
2051 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2052 {
2053 pCtx->tr = pVM->rem.s.Env.tr.selector;
2054 STAM_COUNTER_INC(&gStatREMTRChange);
2055 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2056 }
2057
2058 /** @todo These values could still be out of sync! */
2059 pCtx->csHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_CS].base;
2060 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2061 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2062 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2063
2064 pCtx->dsHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_DS].base;
2065 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2066 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2067
2068 pCtx->esHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_ES].base;
2069 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2070 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2071
2072 pCtx->fsHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_FS].base;
2073 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2074 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2075
2076 pCtx->gsHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_GS].base;
2077 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2078 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2079
2080 pCtx->ssHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_SS].base;
2081 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2082 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2083
2084 pCtx->ldtrHid.u32Base = (uint32_t)pVM->rem.s.Env.ldt.base;
2085 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2086 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2087
2088 pCtx->trHid.u32Base = (uint32_t)pVM->rem.s.Env.tr.base;
2089 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2090 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2091
2092 /* Sysenter MSR */
2093 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2094 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2095 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2096
2097 remR3TrapClear(pVM);
2098
2099 /*
2100 * Check for traps.
2101 */
2102 if ( pVM->rem.s.Env.exception_index >= 0
2103 && pVM->rem.s.Env.exception_index < 256)
2104 {
2105 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2106 int rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2107 AssertRC(rc);
2108 switch (pVM->rem.s.Env.exception_index)
2109 {
2110 case 0x0e:
2111 TRPMSetFaultAddress(pVM, pCtx->cr2);
2112 /* fallthru */
2113 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2114 case 0x11: case 0x08: /* 0 */
2115 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2116 break;
2117 }
2118
2119 }
2120
2121 /*
2122 * We're not longer in REM mode.
2123 */
2124 pVM->rem.s.fInREM = false;
2125 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2126 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2127 return VINF_SUCCESS;
2128}
2129
2130
2131/**
2132 * This is called by the disassembler when it wants to update the cpu state
2133 * before for instance doing a register dump.
2134 */
2135static void remR3StateUpdate(PVM pVM)
2136{
2137 Assert(pVM->rem.s.fInREM);
2138 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2139
2140 /*
2141 * Copy back the registers.
2142 * This is done in the order they are declared in the CPUMCTX structure.
2143 */
2144
2145 /** @todo FOP */
2146 /** @todo FPUIP */
2147 /** @todo CS */
2148 /** @todo FPUDP */
2149 /** @todo DS */
2150 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2151 pCtx->fpu.MXCSR = 0;
2152 pCtx->fpu.MXCSR_MASK = 0;
2153
2154 /** @todo check if FPU/XMM was actually used in the recompiler */
2155 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2156//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2157
2158 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2159 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2160 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2161 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2162 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2163 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2164 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2165
2166 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2167 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2168
2169 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2170 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2171 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2172 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2173 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2174
2175 pCtx->eip = pVM->rem.s.Env.eip;
2176 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2177
2178 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2179 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2180 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2181 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2182
2183 pCtx->dr0 = pVM->rem.s.Env.dr[0];
2184 pCtx->dr1 = pVM->rem.s.Env.dr[1];
2185 pCtx->dr2 = pVM->rem.s.Env.dr[2];
2186 pCtx->dr3 = pVM->rem.s.Env.dr[3];
2187 pCtx->dr4 = pVM->rem.s.Env.dr[4];
2188 pCtx->dr5 = pVM->rem.s.Env.dr[5];
2189 pCtx->dr6 = pVM->rem.s.Env.dr[6];
2190 pCtx->dr7 = pVM->rem.s.Env.dr[7];
2191
2192 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2193 if (pCtx->gdtr.pGdt != (uint32_t)pVM->rem.s.Env.gdt.base)
2194 {
2195 pCtx->gdtr.pGdt = (uint32_t)pVM->rem.s.Env.gdt.base;
2196 STAM_COUNTER_INC(&gStatREMGDTChange);
2197 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2198 }
2199
2200 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2201 if (pCtx->idtr.pIdt != (uint32_t)pVM->rem.s.Env.idt.base)
2202 {
2203 pCtx->idtr.pIdt = (uint32_t)pVM->rem.s.Env.idt.base;
2204 STAM_COUNTER_INC(&gStatREMIDTChange);
2205 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2206 }
2207
2208 if (pCtx->ldtr != pVM->rem.s.Env.ldt.selector)
2209 {
2210 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2211 STAM_COUNTER_INC(&gStatREMLDTRChange);
2212 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2213 }
2214 if (pCtx->tr != pVM->rem.s.Env.tr.selector)
2215 {
2216 pCtx->tr = pVM->rem.s.Env.tr.selector;
2217 STAM_COUNTER_INC(&gStatREMTRChange);
2218 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2219 }
2220
2221 /** @todo These values could still be out of sync! */
2222 pCtx->csHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_CS].base;
2223 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2224 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2225 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2226
2227 pCtx->dsHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_DS].base;
2228 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2229 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2230
2231 pCtx->esHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_ES].base;
2232 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2233 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2234
2235 pCtx->fsHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_FS].base;
2236 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2237 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2238
2239 pCtx->gsHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_GS].base;
2240 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2241 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2242
2243 pCtx->ssHid.u32Base = (uint32_t)pVM->rem.s.Env.segs[R_SS].base;
2244 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2245 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2246
2247 pCtx->ldtrHid.u32Base = (uint32_t)pVM->rem.s.Env.ldt.base;
2248 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2249 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2250
2251 pCtx->trHid.u32Base = (uint32_t)pVM->rem.s.Env.tr.base;
2252 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2253 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xFFFF;
2254
2255 /* Sysenter MSR */
2256 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2257 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2258 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2259}
2260
2261
2262/**
2263 * Update the VMM state information if we're currently in REM.
2264 *
2265 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2266 * we're currently executing in REM and the VMM state is invalid. This method will of
2267 * course check that we're executing in REM before syncing any data over to the VMM.
2268 *
2269 * @param pVM The VM handle.
2270 */
2271REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2272{
2273 if (pVM->rem.s.fInREM)
2274 remR3StateUpdate(pVM);
2275}
2276
2277
2278#undef LOG_GROUP
2279#define LOG_GROUP LOG_GROUP_REM
2280
2281
2282/**
2283 * Notify the recompiler about Address Gate 20 state change.
2284 *
2285 * This notification is required since A20 gate changes are
2286 * initialized from a device driver and the VM might just as
2287 * well be in REM mode as in RAW mode.
2288 *
2289 * @param pVM VM handle.
2290 * @param fEnable True if the gate should be enabled.
2291 * False if the gate should be disabled.
2292 */
2293REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2294{
2295 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2296 VM_ASSERT_EMT(pVM);
2297 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2298}
2299
2300
2301/**
2302 * Replays the invalidated recorded pages.
2303 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2304 *
2305 * @param pVM VM handle.
2306 */
2307REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2308{
2309 VM_ASSERT_EMT(pVM);
2310
2311 /*
2312 * Sync the required registers.
2313 */
2314 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2315 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2316 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2317 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2318
2319 /*
2320 * Replay the flushes.
2321 */
2322 pVM->rem.s.fIgnoreInvlPg = true;
2323 RTUINT i;
2324 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2325 {
2326 Log2(("REMR3ReplayInvalidatedPages: invlpg %VGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2327 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2328 }
2329 pVM->rem.s.fIgnoreInvlPg = false;
2330 pVM->rem.s.cInvalidatedPages = 0;
2331}
2332
2333
2334/**
2335 * Replays the invalidated recorded pages.
2336 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2337 *
2338 * @param pVM VM handle.
2339 */
2340REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2341{
2342 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2343 VM_ASSERT_EMT(pVM);
2344
2345 /*
2346 * Replay the flushes.
2347 */
2348 RTUINT i;
2349 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2350 pVM->rem.s.cHandlerNotifications = 0;
2351 for (i = 0; i < c; i++)
2352 {
2353 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2354 switch (pRec->enmKind)
2355 {
2356 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2357 REMR3NotifyHandlerPhysicalRegister(pVM,
2358 pRec->u.PhysicalRegister.enmType,
2359 pRec->u.PhysicalRegister.GCPhys,
2360 pRec->u.PhysicalRegister.cb,
2361 pRec->u.PhysicalRegister.fHasHCHandler);
2362 break;
2363
2364 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2365 REMR3NotifyHandlerPhysicalDeregister(pVM,
2366 pRec->u.PhysicalDeregister.enmType,
2367 pRec->u.PhysicalDeregister.GCPhys,
2368 pRec->u.PhysicalDeregister.cb,
2369 pRec->u.PhysicalDeregister.fHasHCHandler,
2370 pRec->u.PhysicalDeregister.pvHCPtr);
2371 break;
2372
2373 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2374 REMR3NotifyHandlerPhysicalModify(pVM,
2375 pRec->u.PhysicalModify.enmType,
2376 pRec->u.PhysicalModify.GCPhysOld,
2377 pRec->u.PhysicalModify.GCPhysNew,
2378 pRec->u.PhysicalModify.cb,
2379 pRec->u.PhysicalModify.fHasHCHandler,
2380 pRec->u.PhysicalModify.pvHCPtr);
2381 break;
2382
2383 default:
2384 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2385 break;
2386 }
2387 }
2388}
2389
2390
2391/**
2392 * Notify REM about changed code page.
2393 *
2394 * @returns VBox status code.
2395 * @param pVM VM handle.
2396 * @param pvCodePage Code page address
2397 */
2398REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2399{
2400 int rc;
2401 RTGCPHYS PhysGC;
2402 uint64_t flags;
2403
2404 VM_ASSERT_EMT(pVM);
2405
2406 /*
2407 * Get the physical page address.
2408 */
2409 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2410 if (rc == VINF_SUCCESS)
2411 {
2412 /*
2413 * Sync the required registers and flush the whole page.
2414 * (Easier to do the whole page than notifying it about each physical
2415 * byte that was changed.
2416 */
2417 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2418 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2419 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2420 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2421
2422 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2423 }
2424 return VINF_SUCCESS;
2425}
2426
2427/**
2428 * Notification about a successful MMR3PhysRegister() call.
2429 *
2430 * @param pVM VM handle.
2431 * @param GCPhys The physical address the RAM.
2432 * @param cb Size of the memory.
2433 * @param pvRam The HC address of the RAM.
2434 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2435 */
2436REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvRam, unsigned fFlags)
2437{
2438 Log(("REMR3NotifyPhysRamRegister: GCPhys=%VGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2439 VM_ASSERT_EMT(pVM);
2440
2441 /*
2442 * Validate input - we trust the caller.
2443 */
2444 Assert(!GCPhys || pvRam);
2445 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
2446 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2447 Assert(cb);
2448 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2449
2450 /*
2451 * Base ram?
2452 */
2453 if (!GCPhys)
2454 {
2455 phys_ram_size = cb;
2456 phys_ram_dirty_size = cb >> PAGE_SHIFT;
2457#ifndef VBOX_STRICT
2458 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
2459 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", phys_ram_dirty_size));
2460#else /* VBOX_STRICT: allocate a full map and make the out of bounds pages invalid. */
2461 phys_ram_dirty = RTMemPageAlloc(_4G >> PAGE_SHIFT);
2462 AssertReleaseMsg(phys_ram_dirty, ("failed to allocate %d bytes of dirty bytes\n", _4G >> PAGE_SHIFT));
2463 uint32_t cbBitmap = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
2464 int rc = RTMemProtect(phys_ram_dirty + cbBitmap, (_4G >> PAGE_SHIFT) - cbBitmap, RTMEM_PROT_NONE);
2465 AssertRC(rc);
2466 phys_ram_dirty += cbBitmap - phys_ram_dirty_size;
2467#endif
2468 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
2469 }
2470
2471 /*
2472 * Register the ram.
2473 */
2474 Assert(!pVM->rem.s.fIgnoreAll);
2475 pVM->rem.s.fIgnoreAll = true;
2476
2477 if (!GCPhys)
2478 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2479 else
2480 {
2481 if (fFlags & MM_RAM_FLAGS_RESERVED)
2482 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2483 else
2484 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2485 }
2486 Assert(pVM->rem.s.fIgnoreAll);
2487 pVM->rem.s.fIgnoreAll = false;
2488}
2489
2490
2491/**
2492 * Notification about a successful PGMR3PhysRegisterChunk() call.
2493 *
2494 * @param pVM VM handle.
2495 * @param GCPhys The physical address the RAM.
2496 * @param cb Size of the memory.
2497 * @param pvRam The HC address of the RAM.
2498 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2499 */
2500REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2501{
2502 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%VGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2503 VM_ASSERT_EMT(pVM);
2504
2505 /*
2506 * Validate input - we trust the caller.
2507 */
2508 Assert(pvRam);
2509 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2510 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2511 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2512 Assert(fFlags == 0 /* normal RAM */);
2513 Assert(!pVM->rem.s.fIgnoreAll);
2514 pVM->rem.s.fIgnoreAll = true;
2515
2516 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2517
2518 Assert(pVM->rem.s.fIgnoreAll);
2519 pVM->rem.s.fIgnoreAll = false;
2520}
2521
2522
2523/**
2524 * Grows dynamically allocated guest RAM.
2525 * Will raise a fatal error if the operation fails.
2526 *
2527 * @param physaddr The physical address.
2528 */
2529void remR3GrowDynRange(unsigned long physaddr)
2530{
2531 int rc;
2532 PVM pVM = cpu_single_env->pVM;
2533
2534 Log(("remR3GrowDynRange %VGp\n", physaddr));
2535 rc = PGM3PhysGrowRange(pVM, (RTGCPHYS)physaddr);
2536 if (VBOX_SUCCESS(rc))
2537 return;
2538
2539 LogRel(("\nUnable to allocate guest RAM chunk at %VGp\n", physaddr));
2540 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %VGp\n", physaddr);
2541 AssertFatalFailed();
2542}
2543
2544
2545/**
2546 * Notification about a successful MMR3PhysRomRegister() call.
2547 *
2548 * @param pVM VM handle.
2549 * @param GCPhys The physical address of the ROM.
2550 * @param cb The size of the ROM.
2551 * @param pvCopy Pointer to the ROM copy.
2552 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2553 * This function will be called when ever the protection of the
2554 * shadow ROM changes (at reset and end of POST).
2555 */
2556REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2557{
2558 Log(("REMR3NotifyPhysRomRegister: GCPhys=%VGp cb=%d pvCopy=%p fShadow=%RTbool\n", GCPhys, cb, pvCopy, fShadow));
2559 VM_ASSERT_EMT(pVM);
2560
2561 /*
2562 * Validate input - we trust the caller.
2563 */
2564 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2565 Assert(cb);
2566 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2567 Assert(pvCopy);
2568 Assert(RT_ALIGN_P(pvCopy, PAGE_SIZE) == pvCopy);
2569
2570 /*
2571 * Register the rom.
2572 */
2573 Assert(!pVM->rem.s.fIgnoreAll);
2574 pVM->rem.s.fIgnoreAll = true;
2575
2576 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2577
2578 Log2(("%.64Vhxd\n", (char *)pvCopy + cb - 64));
2579
2580 Assert(pVM->rem.s.fIgnoreAll);
2581 pVM->rem.s.fIgnoreAll = false;
2582}
2583
2584
2585/**
2586 * Notification about a successful MMR3PhysRegister() call.
2587 *
2588 * @param pVM VM Handle.
2589 * @param GCPhys Start physical address.
2590 * @param cb The size of the range.
2591 */
2592REMR3DECL(void) REMR3NotifyPhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2593{
2594 Log(("REMR3NotifyPhysReserve: GCPhys=%VGp cb=%d\n", GCPhys, cb));
2595 VM_ASSERT_EMT(pVM);
2596
2597 /*
2598 * Validate input - we trust the caller.
2599 */
2600 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2601 Assert(cb);
2602 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2603
2604 /*
2605 * Unassigning the memory.
2606 */
2607 Assert(!pVM->rem.s.fIgnoreAll);
2608 pVM->rem.s.fIgnoreAll = true;
2609
2610 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2611
2612 Assert(pVM->rem.s.fIgnoreAll);
2613 pVM->rem.s.fIgnoreAll = false;
2614}
2615
2616
2617/**
2618 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2619 *
2620 * @param pVM VM Handle.
2621 * @param enmType Handler type.
2622 * @param GCPhys Handler range address.
2623 * @param cb Size of the handler range.
2624 * @param fHasHCHandler Set if the handler has a HC callback function.
2625 *
2626 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2627 * Handler memory type to memory which has no HC handler.
2628 */
2629REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2630{
2631 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%VGp cb=%d fHasHCHandler=%d\n",
2632 enmType, GCPhys, cb, fHasHCHandler));
2633 VM_ASSERT_EMT(pVM);
2634 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2635 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2636
2637 if (pVM->rem.s.cHandlerNotifications)
2638 REMR3ReplayHandlerNotifications(pVM);
2639
2640 Assert(!pVM->rem.s.fIgnoreAll);
2641 pVM->rem.s.fIgnoreAll = true;
2642
2643 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2644 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2645 else if (fHasHCHandler)
2646 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2647
2648 Assert(pVM->rem.s.fIgnoreAll);
2649 pVM->rem.s.fIgnoreAll = false;
2650}
2651
2652
2653/**
2654 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2655 *
2656 * @param pVM VM Handle.
2657 * @param enmType Handler type.
2658 * @param GCPhys Handler range address.
2659 * @param cb Size of the handler range.
2660 * @param fHasHCHandler Set if the handler has a HC callback function.
2661 * @param pvHCPtr The HC virtual address corresponding to GCPhys if available.
2662 */
2663REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, void *pvHCPtr)
2664{
2665 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%VGp cb=%d fHasHCHandler=%d pvHCPtr=%p RAM=%08x\n",
2666 enmType, GCPhys, cb, fHasHCHandler, pvHCPtr, MMR3PhysGetRamSize(pVM)));
2667 VM_ASSERT_EMT(pVM);
2668
2669 if (pVM->rem.s.cHandlerNotifications)
2670 REMR3ReplayHandlerNotifications(pVM);
2671
2672 Assert(!pVM->rem.s.fIgnoreAll);
2673 pVM->rem.s.fIgnoreAll = true;
2674
2675 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2676 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2677 else if (fHasHCHandler)
2678 {
2679 if (!pvHCPtr)
2680 {
2681 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2682 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2683 }
2684 else
2685 {
2686 /* This is not perfect, but it'll do for PD monitoring... */
2687 Assert(cb == PAGE_SIZE);
2688 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2689 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2690 }
2691 }
2692
2693 Assert(pVM->rem.s.fIgnoreAll);
2694 pVM->rem.s.fIgnoreAll = false;
2695}
2696
2697
2698/**
2699 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2700 *
2701 * @param pVM VM Handle.
2702 * @param enmType Handler type.
2703 * @param GCPhysOld Old handler range address.
2704 * @param GCPhysNew New handler range address.
2705 * @param cb Size of the handler range.
2706 * @param fHasHCHandler Set if the handler has a HC callback function.
2707 * @param pvHCPtr The HC virtual address corresponding to GCPhys if available.
2708 */
2709REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, void *pvHCPtr)
2710{
2711 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%VGp GCPhysNew=%VGp cb=%d fHasHCHandler=%d pvHCPtr=%p\n",
2712 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, pvHCPtr));
2713 VM_ASSERT_EMT(pVM);
2714 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
2715
2716 if (pVM->rem.s.cHandlerNotifications)
2717 REMR3ReplayHandlerNotifications(pVM);
2718
2719 if (fHasHCHandler)
2720 {
2721 Assert(!pVM->rem.s.fIgnoreAll);
2722 pVM->rem.s.fIgnoreAll = true;
2723
2724 /*
2725 * Reset the old page.
2726 */
2727 if (!pvHCPtr)
2728 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
2729 else
2730 {
2731 /* This is not perfect, but it'll do for PD monitoring... */
2732 Assert(cb == PAGE_SIZE);
2733 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
2734 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
2735 }
2736
2737 /*
2738 * Update the new page.
2739 */
2740 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
2741 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2742 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
2743
2744 Assert(pVM->rem.s.fIgnoreAll);
2745 pVM->rem.s.fIgnoreAll = false;
2746 }
2747}
2748
2749
2750/**
2751 * Checks if we're handling access to this page or not.
2752 *
2753 * @returns true if we're trapping access.
2754 * @returns false if we aren't.
2755 * @param pVM The VM handle.
2756 * @param GCPhys The physical address.
2757 *
2758 * @remark This function will only work correctly in VBOX_STRICT builds!
2759 */
2760REMDECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
2761{
2762#ifdef VBOX_STRICT
2763 if (pVM->rem.s.cHandlerNotifications)
2764 REMR3ReplayHandlerNotifications(pVM);
2765
2766 unsigned long off = get_phys_page_offset(GCPhys);
2767 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
2768 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
2769 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
2770#else
2771 return false;
2772#endif
2773}
2774
2775
2776/**
2777 * Deals with a rare case in get_phys_addr_code where the code
2778 * is being monitored.
2779 *
2780 * It could also be an MMIO page, in which case we will raise a fatal error.
2781 *
2782 * @returns The physical address corresponding to addr.
2783 * @param env The cpu environment.
2784 * @param addr The virtual address.
2785 * @param pTLBEntry The TLB entry.
2786 */
2787target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry)
2788{
2789 PVM pVM = env->pVM;
2790 if ((pTLBEntry->addr_code & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
2791 {
2792 target_ulong ret = pTLBEntry->addend + addr;
2793 AssertMsg2("remR3PhysGetPhysicalAddressCode: addr=%VGv addr_code=%VGv addend=%VGp ret=%VGp\n",
2794 (RTGCPTR)addr, (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, ret);
2795 return ret;
2796 }
2797 LogRel(("\nTrying to execute code with memory type addr_code=%VGv addend=%VGp at %VGv! (iHandlerMemType=%#x iMMIOMemType=%#x)\n"
2798 "*** handlers\n",
2799 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType));
2800 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
2801 LogRel(("*** mmio\n"));
2802 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
2803 LogRel(("*** phys\n"));
2804 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
2805 cpu_abort(env, "Trying to execute code with memory type addr_code=%VGv addend=%VGp at %VGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
2806 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
2807 AssertFatalFailed();
2808}
2809
2810
2811/** Validate the physical address passed to the read functions.
2812 * Useful for finding non-guest-ram reads/writes. */
2813#if 1 /* disable if it becomes bothersome... */
2814# define VBOX_CHECK_ADDR(GCPhys) AssertMsg(PGMPhysIsGCPhysValid(cpu_single_env->pVM, (GCPhys)), ("%VGp\n", (GCPhys)))
2815#else
2816# define VBOX_CHECK_ADDR(GCPhys) do { } while (0)
2817#endif
2818
2819/**
2820 * Read guest RAM and ROM.
2821 *
2822 * @param SrcGCPhys The source address (guest physical).
2823 * @param pvDst The destination address.
2824 * @param cb Number of bytes
2825 */
2826void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
2827{
2828 STAM_PROFILE_ADV_START(&gStatMemRead, a);
2829 VBOX_CHECK_ADDR(SrcGCPhys);
2830 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
2831 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
2832}
2833
2834
2835/**
2836 * Read guest RAM and ROM, unsigned 8-bit.
2837 *
2838 * @param SrcGCPhys The source address (guest physical).
2839 */
2840uint8_t remR3PhysReadU8(RTGCPHYS SrcGCPhys)
2841{
2842 uint8_t val;
2843 STAM_PROFILE_ADV_START(&gStatMemRead, a);
2844 VBOX_CHECK_ADDR(SrcGCPhys);
2845 val = PGMR3PhysReadByte(cpu_single_env->pVM, SrcGCPhys);
2846 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
2847 return val;
2848}
2849
2850
2851/**
2852 * Read guest RAM and ROM, signed 8-bit.
2853 *
2854 * @param SrcGCPhys The source address (guest physical).
2855 */
2856int8_t remR3PhysReadS8(RTGCPHYS SrcGCPhys)
2857{
2858 int8_t val;
2859 STAM_PROFILE_ADV_START(&gStatMemRead, a);
2860 VBOX_CHECK_ADDR(SrcGCPhys);
2861 val = PGMR3PhysReadByte(cpu_single_env->pVM, SrcGCPhys);
2862 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
2863 return val;
2864}
2865
2866
2867/**
2868 * Read guest RAM and ROM, unsigned 16-bit.
2869 *
2870 * @param SrcGCPhys The source address (guest physical).
2871 */
2872uint16_t remR3PhysReadU16(RTGCPHYS SrcGCPhys)
2873{
2874 uint16_t val;
2875 STAM_PROFILE_ADV_START(&gStatMemRead, a);
2876 VBOX_CHECK_ADDR(SrcGCPhys);
2877 val = PGMR3PhysReadWord(cpu_single_env->pVM, SrcGCPhys);
2878 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
2879 return val;
2880}
2881
2882
2883/**
2884 * Read guest RAM and ROM, signed 16-bit.
2885 *
2886 * @param SrcGCPhys The source address (guest physical).
2887 */
2888int16_t remR3PhysReadS16(RTGCPHYS SrcGCPhys)
2889{
2890 uint16_t val;
2891 STAM_PROFILE_ADV_START(&gStatMemRead, a);
2892 VBOX_CHECK_ADDR(SrcGCPhys);
2893 val = PGMR3PhysReadWord(cpu_single_env->pVM, SrcGCPhys);
2894 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
2895 return val;
2896}
2897
2898
2899/**
2900 * Read guest RAM and ROM, unsigned 32-bit.
2901 *
2902 * @param SrcGCPhys The source address (guest physical).
2903 */
2904uint32_t remR3PhysReadU32(RTGCPHYS SrcGCPhys)
2905{
2906 uint32_t val;
2907 STAM_PROFILE_ADV_START(&gStatMemRead, a);
2908 VBOX_CHECK_ADDR(SrcGCPhys);
2909 val = PGMR3PhysReadDword(cpu_single_env->pVM, SrcGCPhys);
2910 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
2911 return val;
2912}
2913
2914
2915/**
2916 * Read guest RAM and ROM, signed 32-bit.
2917 *
2918 * @param SrcGCPhys The source address (guest physical).
2919 */
2920int32_t remR3PhysReadS32(RTGCPHYS SrcGCPhys)
2921{
2922 int32_t val;
2923 STAM_PROFILE_ADV_START(&gStatMemRead, a);
2924 VBOX_CHECK_ADDR(SrcGCPhys);
2925 val = PGMR3PhysReadDword(cpu_single_env->pVM, SrcGCPhys);
2926 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
2927 return val;
2928}
2929
2930
2931/**
2932 * Read guest RAM and ROM, unsigned 64-bit.
2933 *
2934 * @param SrcGCPhys The source address (guest physical).
2935 */
2936uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
2937{
2938 uint64_t val;
2939 STAM_PROFILE_ADV_START(&gStatMemRead, a);
2940 VBOX_CHECK_ADDR(SrcGCPhys);
2941 val = PGMR3PhysReadDword(cpu_single_env->pVM, SrcGCPhys)
2942 | ((uint64_t)PGMR3PhysReadDword(cpu_single_env->pVM, SrcGCPhys + 4) << 32); /** @todo fix me! */
2943 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
2944 return val;
2945}
2946
2947
2948/**
2949 * Write guest RAM.
2950 *
2951 * @param DstGCPhys The destination address (guest physical).
2952 * @param pvSrc The source address.
2953 * @param cb Number of bytes to write
2954 */
2955void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
2956{
2957 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
2958 VBOX_CHECK_ADDR(DstGCPhys);
2959 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
2960 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
2961}
2962
2963
2964/**
2965 * Write guest RAM, unsigned 8-bit.
2966 *
2967 * @param DstGCPhys The destination address (guest physical).
2968 * @param val Value
2969 */
2970void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
2971{
2972 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
2973 VBOX_CHECK_ADDR(DstGCPhys);
2974 PGMR3PhysWriteByte(cpu_single_env->pVM, DstGCPhys, val);
2975 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
2976}
2977
2978
2979/**
2980 * Write guest RAM, unsigned 8-bit.
2981 *
2982 * @param DstGCPhys The destination address (guest physical).
2983 * @param val Value
2984 */
2985void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
2986{
2987 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
2988 VBOX_CHECK_ADDR(DstGCPhys);
2989 PGMR3PhysWriteWord(cpu_single_env->pVM, DstGCPhys, val);
2990 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
2991}
2992
2993
2994/**
2995 * Write guest RAM, unsigned 32-bit.
2996 *
2997 * @param DstGCPhys The destination address (guest physical).
2998 * @param val Value
2999 */
3000void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3001{
3002 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3003 VBOX_CHECK_ADDR(DstGCPhys);
3004 PGMR3PhysWriteDword(cpu_single_env->pVM, DstGCPhys, val);
3005 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3006}
3007
3008
3009/**
3010 * Write guest RAM, unsigned 64-bit.
3011 *
3012 * @param DstGCPhys The destination address (guest physical).
3013 * @param val Value
3014 */
3015void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3016{
3017 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3018 VBOX_CHECK_ADDR(DstGCPhys);
3019 PGMR3PhysWriteDword(cpu_single_env->pVM, DstGCPhys, (uint32_t)val); /** @todo add U64 interface. */
3020 PGMR3PhysWriteDword(cpu_single_env->pVM, DstGCPhys + 4, val >> 32);
3021 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3022}
3023
3024#undef LOG_GROUP
3025#define LOG_GROUP LOG_GROUP_REM_MMIO
3026
3027/** Read MMIO memory. */
3028static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3029{
3030 uint32_t u32 = 0;
3031 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3032 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3033 Log2(("remR3MMIOReadU8: GCPhys=%VGp -> %02x\n", GCPhys, u32));
3034 return u32;
3035}
3036
3037/** Read MMIO memory. */
3038static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3039{
3040 uint32_t u32 = 0;
3041 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3042 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3043 Log2(("remR3MMIOReadU16: GCPhys=%VGp -> %04x\n", GCPhys, u32));
3044 return u32;
3045}
3046
3047/** Read MMIO memory. */
3048static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3049{
3050 uint32_t u32 = 0;
3051 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3052 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3053 Log2(("remR3MMIOReadU32: GCPhys=%VGp -> %08x\n", GCPhys, u32));
3054 return u32;
3055}
3056
3057/** Write to MMIO memory. */
3058static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3059{
3060 Log2(("remR3MMIOWriteU8: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3061 int rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3062 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3063}
3064
3065/** Write to MMIO memory. */
3066static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3067{
3068 Log2(("remR3MMIOWriteU16: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3069 int rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3070 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3071}
3072
3073/** Write to MMIO memory. */
3074static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3075{
3076 Log2(("remR3MMIOWriteU32: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3077 int rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3078 AssertMsg(rc == VINF_SUCCESS, ("rc=%Vrc\n", rc)); NOREF(rc);
3079}
3080
3081
3082#undef LOG_GROUP
3083#define LOG_GROUP LOG_GROUP_REM_HANDLER
3084
3085/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3086
3087static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3088{
3089 Log2(("remR3HandlerReadU8: GCPhys=%VGp\n", GCPhys));
3090 uint8_t u8;
3091 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3092 return u8;
3093}
3094
3095static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3096{
3097 Log2(("remR3HandlerReadU16: GCPhys=%VGp\n", GCPhys));
3098 uint16_t u16;
3099 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3100 return u16;
3101}
3102
3103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3104{
3105 Log2(("remR3HandlerReadU32: GCPhys=%VGp\n", GCPhys));
3106 uint32_t u32;
3107 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3108 return u32;
3109}
3110
3111static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3112{
3113 Log2(("remR3HandlerWriteU8: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3114 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3115}
3116
3117static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3118{
3119 Log2(("remR3HandlerWriteU16: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3120 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3121}
3122
3123static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3124{
3125 Log2(("remR3HandlerWriteU32: GCPhys=%VGp u32=%#x\n", GCPhys, u32));
3126 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3127}
3128
3129/* -+- disassembly -+- */
3130
3131#undef LOG_GROUP
3132#define LOG_GROUP LOG_GROUP_REM_DISAS
3133
3134
3135/**
3136 * Enables or disables singled stepped disassembly.
3137 *
3138 * @returns VBox status code.
3139 * @param pVM VM handle.
3140 * @param fEnable To enable set this flag, to disable clear it.
3141 */
3142static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3143{
3144 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3145 VM_ASSERT_EMT(pVM);
3146
3147 if (fEnable)
3148 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3149 else
3150 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3151 return VINF_SUCCESS;
3152}
3153
3154
3155/**
3156 * Enables or disables singled stepped disassembly.
3157 *
3158 * @returns VBox status code.
3159 * @param pVM VM handle.
3160 * @param fEnable To enable set this flag, to disable clear it.
3161 */
3162REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3163{
3164 PVMREQ pReq;
3165 int rc;
3166
3167 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3168 if (VM_IS_EMT(pVM))
3169 return remR3DisasEnableStepping(pVM, fEnable);
3170
3171 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3172 AssertRC(rc);
3173 if (VBOX_SUCCESS(rc))
3174 rc = pReq->iStatus;
3175 VMR3ReqFree(pReq);
3176 return rc;
3177}
3178
3179
3180#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3181/**
3182 * External Debugger Command: .remstep [on|off|1|0]
3183 */
3184static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3185{
3186 bool fEnable;
3187 int rc;
3188
3189 /* print status */
3190 if (cArgs == 0)
3191 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3192 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3193
3194 /* convert the argument and change the mode. */
3195 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3196 if (VBOX_FAILURE(rc))
3197 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3198 rc = REMR3DisasEnableStepping(pVM, fEnable);
3199 if (VBOX_FAILURE(rc))
3200 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3201 return rc;
3202}
3203#endif
3204
3205
3206/**
3207 * Disassembles n instructions and prints them to the log.
3208 *
3209 * @returns Success indicator.
3210 * @param env Pointer to the recompiler CPU structure.
3211 * @param f32BitCode Indicates that whether or not the code should
3212 * be disassembled as 16 or 32 bit. If -1 the CS
3213 * selector will be inspected.
3214 * @param nrInstructions Nr of instructions to disassemble
3215 * @param pszPrefix
3216 * @remark not currently used for anything but ad-hoc debugging.
3217 */
3218bool remR3DisasBlock(CPUState *env, int f32BitCode, int nrInstructions, char *pszPrefix)
3219{
3220 int i;
3221
3222 /*
3223 * Determin 16/32 bit mode.
3224 */
3225 if (f32BitCode == -1)
3226 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3227
3228 /*
3229 * Convert cs:eip to host context address.
3230 * We don't care to much about cross page correctness presently.
3231 */
3232 RTGCPTR GCPtrPC = env->segs[R_CS].base + env->eip;
3233 void *pvPC;
3234 if (f32BitCode && (env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3235 {
3236 /* convert eip to physical address. */
3237 int rc = PGMPhysGCPtr2HCPtrByGstCR3(env->pVM,
3238 GCPtrPC,
3239 env->cr[3],
3240 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE), /** @todo add longmode flag */
3241 &pvPC);
3242 if (VBOX_FAILURE(rc))
3243 {
3244 if (!PATMIsPatchGCAddr(env->pVM, GCPtrPC))
3245 return false;
3246 pvPC = (char *)PATMR3QueryPatchMemHC(env->pVM, NULL)
3247 + (GCPtrPC - PATMR3QueryPatchMemGC(env->pVM, NULL));
3248 }
3249 }
3250 else
3251 {
3252 /* physical address */
3253 int rc = PGMPhysGCPhys2HCPtr(env->pVM, (RTGCPHYS)GCPtrPC, nrInstructions * 16, &pvPC);
3254 if (VBOX_FAILURE(rc))
3255 return false;
3256 }
3257
3258 /*
3259 * Disassemble.
3260 */
3261 RTINTPTR off = env->eip - (RTINTPTR)pvPC;
3262 DISCPUSTATE Cpu;
3263 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3264 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3265 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3266 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3267 //Cpu.dwUserData[2] = GCPtrPC;
3268
3269 for (i=0;i<nrInstructions;i++)
3270 {
3271 char szOutput[256];
3272 uint32_t cbOp;
3273 if (!DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0]))
3274 return false;
3275 if (pszPrefix)
3276 Log(("%s: %s", pszPrefix, szOutput));
3277 else
3278 Log(("%s", szOutput));
3279
3280 pvPC += cbOp;
3281 }
3282 return true;
3283}
3284
3285
3286/** @todo need to test the new code, using the old code in the mean while. */
3287#define USE_OLD_DUMP_AND_DISASSEMBLY
3288
3289/**
3290 * Disassembles one instruction and prints it to the log.
3291 *
3292 * @returns Success indicator.
3293 * @param env Pointer to the recompiler CPU structure.
3294 * @param f32BitCode Indicates that whether or not the code should
3295 * be disassembled as 16 or 32 bit. If -1 the CS
3296 * selector will be inspected.
3297 * @param pszPrefix
3298 */
3299bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3300{
3301#ifdef USE_OLD_DUMP_AND_DISASSEMBLY
3302 PVM pVM = env->pVM;
3303
3304 /*
3305 * Determin 16/32 bit mode.
3306 */
3307 if (f32BitCode == -1)
3308 f32BitCode = !!(env->segs[R_CS].flags & X86_DESC_DB); /** @todo is this right?!!?!?!?!? */
3309
3310 /*
3311 * Log registers
3312 */
3313 if (LogIs2Enabled())
3314 {
3315 remR3StateUpdate(pVM);
3316 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3317 }
3318
3319 /*
3320 * Convert cs:eip to host context address.
3321 * We don't care to much about cross page correctness presently.
3322 */
3323 RTGCPTR GCPtrPC = env->segs[R_CS].base + env->eip;
3324 void *pvPC;
3325 if ((env->cr[0] & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG))
3326 {
3327 /* convert eip to physical address. */
3328 int rc = PGMPhysGCPtr2HCPtrByGstCR3(pVM,
3329 GCPtrPC,
3330 env->cr[3],
3331 env->cr[4] & (X86_CR4_PSE | X86_CR4_PAE),
3332 &pvPC);
3333 if (VBOX_FAILURE(rc))
3334 {
3335 if (!PATMIsPatchGCAddr(pVM, GCPtrPC))
3336 return false;
3337 pvPC = (char *)PATMR3QueryPatchMemHC(pVM, NULL)
3338 + (GCPtrPC - PATMR3QueryPatchMemGC(pVM, NULL));
3339 }
3340 }
3341 else
3342 {
3343
3344 /* physical address */
3345 int rc = PGMPhysGCPhys2HCPtr(pVM, (RTGCPHYS)GCPtrPC, 16, &pvPC);
3346 if (VBOX_FAILURE(rc))
3347 return false;
3348 }
3349
3350 /*
3351 * Disassemble.
3352 */
3353 RTINTPTR off = env->eip - (RTINTPTR)pvPC;
3354 DISCPUSTATE Cpu;
3355 Cpu.mode = f32BitCode ? CPUMODE_32BIT : CPUMODE_16BIT;
3356 Cpu.pfnReadBytes = NULL; /** @todo make cs:eip reader for the disassembler. */
3357 //Cpu.dwUserData[0] = (uintptr_t)pVM;
3358 //Cpu.dwUserData[1] = (uintptr_t)pvPC;
3359 //Cpu.dwUserData[2] = GCPtrPC;
3360 char szOutput[256];
3361 uint32_t cbOp;
3362 if (!DISInstr(&Cpu, (uintptr_t)pvPC, off, &cbOp, &szOutput[0]))
3363 return false;
3364
3365 if (!f32BitCode)
3366 {
3367 if (pszPrefix)
3368 Log(("%s: %04X:%s", pszPrefix, env->segs[R_CS].selector, szOutput));
3369 else
3370 Log(("%04X:%s", env->segs[R_CS].selector, szOutput));
3371 }
3372 else
3373 {
3374 if (pszPrefix)
3375 Log(("%s: %s", pszPrefix, szOutput));
3376 else
3377 Log(("%s", szOutput));
3378 }
3379 return true;
3380
3381#else /* !USE_OLD_DUMP_AND_DISASSEMBLY */
3382 PVM pVM = env->pVM;
3383 const bool fLog = LogIsEnabled();
3384 const bool fLog2 = LogIs2Enabled();
3385 int rc = VINF_SUCCESS;
3386
3387 /*
3388 * Don't bother if there ain't any log output to do.
3389 */
3390 if (!fLog && !fLog2)
3391 return true;
3392
3393 /*
3394 * Update the state so DBGF reads the correct register values.
3395 */
3396 remR3StateUpdate(pVM);
3397
3398 /*
3399 * Log registers if requested.
3400 */
3401 if (!fLog2)
3402 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3403
3404 /*
3405 * Disassemble to log.
3406 */
3407 if (fLog)
3408 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3409
3410 return VBOX_SUCCESS(rc);
3411#endif
3412}
3413
3414
3415/**
3416 * Disassemble recompiled code.
3417 *
3418 * @param phFileIgnored Ignored, logfile usually.
3419 * @param pvCode Pointer to the code block.
3420 * @param cb Size of the code block.
3421 */
3422void disas(FILE *phFileIgnored, void *pvCode, unsigned long cb)
3423{
3424 if (LogIs2Enabled())
3425 {
3426 unsigned off = 0;
3427 char szOutput[256];
3428 DISCPUSTATE Cpu = {0};
3429 Cpu.mode = CPUMODE_32BIT;
3430
3431 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3432 while (off < cb)
3433 {
3434 uint32_t cbInstr;
3435 if (DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput))
3436 RTLogPrintf("%s", szOutput);
3437 else
3438 {
3439 RTLogPrintf("disas error\n");
3440 cbInstr = 1;
3441 }
3442 off += cbInstr;
3443 }
3444 }
3445 NOREF(phFileIgnored);
3446}
3447
3448
3449/**
3450 * Disassemble guest code.
3451 *
3452 * @param phFileIgnored Ignored, logfile usually.
3453 * @param uCode The guest address of the code to disassemble. (flat?)
3454 * @param cb Number of bytes to disassemble.
3455 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3456 */
3457void target_disas(FILE *phFileIgnored, target_ulong uCode, target_ulong cb, int fFlags)
3458{
3459 if (LogIs2Enabled())
3460 {
3461 PVM pVM = cpu_single_env->pVM;
3462
3463 /*
3464 * Update the state so DBGF reads the correct register values (flags).
3465 */
3466 remR3StateUpdate(pVM);
3467
3468 /*
3469 * Do the disassembling.
3470 */
3471 RTLogPrintf("Guest Code: PC=%VGp #VGp (%VGp) bytes fFlags=%d\n", uCode, cb, cb, fFlags);
3472 RTSEL cs = cpu_single_env->segs[R_CS].selector;
3473 RTGCUINTPTR eip = uCode - cpu_single_env->segs[R_CS].base;
3474 for (;;)
3475 {
3476 char szBuf[256];
3477 uint32_t cbInstr;
3478 int rc = DBGFR3DisasInstrEx(pVM,
3479 cs,
3480 eip,
3481 0,
3482 szBuf, sizeof(szBuf),
3483 &cbInstr);
3484 if (VBOX_SUCCESS(rc))
3485 RTLogPrintf("%VGp %s\n", uCode, szBuf);
3486 else
3487 {
3488 RTLogPrintf("%VGp %04x:%VGp: %s\n", uCode, cs, eip, szBuf);
3489 cbInstr = 1;
3490 }
3491
3492 /* next */
3493 if (cb <= cbInstr)
3494 break;
3495 cb -= cbInstr;
3496 uCode += cbInstr;
3497 eip += cbInstr;
3498 }
3499 }
3500 NOREF(phFileIgnored);
3501}
3502
3503
3504/**
3505 * Looks up a guest symbol.
3506 *
3507 * @returns Pointer to symbol name. This is a static buffer.
3508 * @param orig_addr The address in question.
3509 */
3510const char *lookup_symbol(target_ulong orig_addr)
3511{
3512 RTGCINTPTR off = 0;
3513 DBGFSYMBOL Sym;
3514 PVM pVM = cpu_single_env->pVM;
3515 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3516 if (VBOX_SUCCESS(rc))
3517 {
3518 static char szSym[sizeof(Sym.szName) + 48];
3519 if (!off)
3520 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3521 else if (off > 0)
3522 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3523 else
3524 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3525 return szSym;
3526 }
3527 return "<N/A>";
3528}
3529
3530
3531#undef LOG_GROUP
3532#define LOG_GROUP LOG_GROUP_REM
3533
3534
3535/* -+- FF notifications -+- */
3536
3537
3538/**
3539 * Notification about a pending interrupt.
3540 *
3541 * @param pVM VM Handle.
3542 * @param u8Interrupt Interrupt
3543 * @thread The emulation thread.
3544 */
3545REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3546{
3547 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3548 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3549}
3550
3551/**
3552 * Notification about a pending interrupt.
3553 *
3554 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3555 * @param pVM VM Handle.
3556 * @thread The emulation thread.
3557 */
3558REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3559{
3560 return pVM->rem.s.u32PendingInterrupt;
3561}
3562
3563/**
3564 * Notification about the interrupt FF being set.
3565 *
3566 * @param pVM VM Handle.
3567 * @thread The emulation thread.
3568 */
3569REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3570{
3571 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3572 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3573 if (pVM->rem.s.fInREM)
3574 {
3575 if (VM_IS_EMT(pVM))
3576 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3577 else
3578 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_HARD);
3579 }
3580}
3581
3582
3583/**
3584 * Notification about the interrupt FF being set.
3585 *
3586 * @param pVM VM Handle.
3587 * @thread The emulation thread.
3588 */
3589REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3590{
3591 LogFlow(("REMR3NotifyInterruptClear:\n"));
3592 VM_ASSERT_EMT(pVM);
3593 if (pVM->rem.s.fInREM)
3594 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3595}
3596
3597
3598/**
3599 * Notification about pending timer(s).
3600 *
3601 * @param pVM VM Handle.
3602 * @thread Any.
3603 */
3604REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3605{
3606#ifndef DEBUG_bird
3607 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3608#endif
3609 if (pVM->rem.s.fInREM)
3610 {
3611 if (VM_IS_EMT(pVM))
3612 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3613 else
3614 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_TIMER);
3615 }
3616}
3617
3618
3619/**
3620 * Notification about pending DMA transfers.
3621 *
3622 * @param pVM VM Handle.
3623 * @thread Any.
3624 */
3625REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3626{
3627 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3628 if (pVM->rem.s.fInREM)
3629 {
3630 if (VM_IS_EMT(pVM))
3631 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3632 else
3633 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_DMA);
3634 }
3635}
3636
3637
3638/**
3639 * Notification about pending timer(s).
3640 *
3641 * @param pVM VM Handle.
3642 * @thread Any.
3643 */
3644REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3645{
3646 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3647 if (pVM->rem.s.fInREM)
3648 {
3649 if (VM_IS_EMT(pVM))
3650 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3651 else
3652 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT);
3653 }
3654}
3655
3656
3657/**
3658 * Notification about pending FF set by an external thread.
3659 *
3660 * @param pVM VM handle.
3661 * @thread Any.
3662 */
3663REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3664{
3665 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3666 if (pVM->rem.s.fInREM)
3667 {
3668 if (VM_IS_EMT(pVM))
3669 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
3670 else
3671 ASMAtomicOrS32(&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT);
3672 }
3673}
3674
3675
3676#ifdef VBOX_WITH_STATISTICS
3677void remR3ProfileStart(int statcode)
3678{
3679 STAMPROFILEADV *pStat;
3680 switch(statcode)
3681 {
3682 case STATS_EMULATE_SINGLE_INSTR:
3683 pStat = &gStatExecuteSingleInstr;
3684 break;
3685 case STATS_QEMU_COMPILATION:
3686 pStat = &gStatCompilationQEmu;
3687 break;
3688 case STATS_QEMU_RUN_EMULATED_CODE:
3689 pStat = &gStatRunCodeQEmu;
3690 break;
3691 case STATS_QEMU_TOTAL:
3692 pStat = &gStatTotalTimeQEmu;
3693 break;
3694 case STATS_QEMU_RUN_TIMERS:
3695 pStat = &gStatTimers;
3696 break;
3697 case STATS_TLB_LOOKUP:
3698 pStat= &gStatTBLookup;
3699 break;
3700 case STATS_IRQ_HANDLING:
3701 pStat= &gStatIRQ;
3702 break;
3703 case STATS_RAW_CHECK:
3704 pStat = &gStatRawCheck;
3705 break;
3706
3707 default:
3708 AssertMsgFailed(("unknown stat %d\n", statcode));
3709 return;
3710 }
3711 STAM_PROFILE_ADV_START(pStat, a);
3712}
3713
3714
3715void remR3ProfileStop(int statcode)
3716{
3717 STAMPROFILEADV *pStat;
3718 switch(statcode)
3719 {
3720 case STATS_EMULATE_SINGLE_INSTR:
3721 pStat = &gStatExecuteSingleInstr;
3722 break;
3723 case STATS_QEMU_COMPILATION:
3724 pStat = &gStatCompilationQEmu;
3725 break;
3726 case STATS_QEMU_RUN_EMULATED_CODE:
3727 pStat = &gStatRunCodeQEmu;
3728 break;
3729 case STATS_QEMU_TOTAL:
3730 pStat = &gStatTotalTimeQEmu;
3731 break;
3732 case STATS_QEMU_RUN_TIMERS:
3733 pStat = &gStatTimers;
3734 break;
3735 case STATS_TLB_LOOKUP:
3736 pStat= &gStatTBLookup;
3737 break;
3738 case STATS_IRQ_HANDLING:
3739 pStat= &gStatIRQ;
3740 break;
3741 case STATS_RAW_CHECK:
3742 pStat = &gStatRawCheck;
3743 break;
3744 default:
3745 AssertMsgFailed(("unknown stat %d\n", statcode));
3746 return;
3747 }
3748 STAM_PROFILE_ADV_STOP(pStat, a);
3749}
3750#endif
3751
3752/**
3753 * Raise an RC, force rem exit.
3754 *
3755 * @param pVM VM handle.
3756 * @param rc The rc.
3757 */
3758void remR3RaiseRC(PVM pVM, int rc)
3759{
3760 Log(("remR3RaiseRC: rc=%Vrc\n", rc));
3761 Assert(pVM->rem.s.fInREM);
3762 VM_ASSERT_EMT(pVM);
3763 pVM->rem.s.rc = rc;
3764 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
3765}
3766
3767
3768/* -+- timers -+- */
3769
3770uint64_t cpu_get_tsc(CPUX86State *env)
3771{
3772 STAM_COUNTER_INC(&gStatCpuGetTSC);
3773 return TMCpuTickGet(env->pVM);
3774}
3775
3776
3777/* -+- interrupts -+- */
3778
3779void cpu_set_ferr(CPUX86State *env)
3780{
3781 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
3782 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
3783}
3784
3785int cpu_get_pic_interrupt(CPUState *env)
3786{
3787 uint8_t u8Interrupt;
3788 int rc;
3789
3790 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
3791 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
3792 * with the (a)pic.
3793 */
3794 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
3795 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
3796 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
3797 * remove this kludge. */
3798 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
3799 {
3800 rc = VINF_SUCCESS;
3801 Assert(env->pVM->rem.s.u32PendingInterrupt >= 0 && env->pVM->rem.s.u32PendingInterrupt <= 255);
3802 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
3803 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
3804 }
3805 else
3806 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
3807
3808 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Vrc\n", u8Interrupt, rc));
3809 if (VBOX_SUCCESS(rc))
3810 {
3811 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
3812 env->interrupt_request |= CPU_INTERRUPT_HARD;
3813 return u8Interrupt;
3814 }
3815 return -1;
3816}
3817
3818
3819/* -+- local apic -+- */
3820
3821void cpu_set_apic_base(CPUX86State *env, uint64_t val)
3822{
3823 int rc = PDMApicSetBase(env->pVM, val);
3824 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Vrc\n", val, rc)); NOREF(rc);
3825}
3826
3827uint64_t cpu_get_apic_base(CPUX86State *env)
3828{
3829 uint64_t u64;
3830 int rc = PDMApicGetBase(env->pVM, &u64);
3831 if (VBOX_SUCCESS(rc))
3832 {
3833 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
3834 return u64;
3835 }
3836 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Vrc)\n", rc));
3837 return 0;
3838}
3839
3840void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
3841{
3842 int rc = PDMApicSetTPR(env->pVM, val);
3843 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Vrc\n", val, rc)); NOREF(rc);
3844}
3845
3846uint8_t cpu_get_apic_tpr(CPUX86State *env)
3847{
3848 uint8_t u8;
3849 int rc = PDMApicGetTPR(env->pVM, &u8);
3850 if (VBOX_SUCCESS(rc))
3851 {
3852 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
3853 return u8;
3854 }
3855 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Vrc)\n", rc));
3856 return 0;
3857}
3858
3859
3860/* -+- I/O Ports -+- */
3861
3862#undef LOG_GROUP
3863#define LOG_GROUP LOG_GROUP_REM_IOPORT
3864
3865void cpu_outb(CPUState *env, int addr, int val)
3866{
3867 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
3868 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
3869
3870 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
3871 if (RT_LIKELY(rc == VINF_SUCCESS))
3872 return;
3873 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
3874 {
3875 Log(("cpu_outb: addr=%#06x val=%#x -> %Vrc\n", addr, val, rc));
3876 remR3RaiseRC(env->pVM, rc);
3877 return;
3878 }
3879 remAbort(rc, __FUNCTION__);
3880}
3881
3882void cpu_outw(CPUState *env, int addr, int val)
3883{
3884 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
3885 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
3886 if (RT_LIKELY(rc == VINF_SUCCESS))
3887 return;
3888 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
3889 {
3890 Log(("cpu_outw: addr=%#06x val=%#x -> %Vrc\n", addr, val, rc));
3891 remR3RaiseRC(env->pVM, rc);
3892 return;
3893 }
3894 remAbort(rc, __FUNCTION__);
3895}
3896
3897void cpu_outl(CPUState *env, int addr, int val)
3898{
3899 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
3900 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
3901 if (RT_LIKELY(rc == VINF_SUCCESS))
3902 return;
3903 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
3904 {
3905 Log(("cpu_outl: addr=%#06x val=%#x -> %Vrc\n", addr, val, rc));
3906 remR3RaiseRC(env->pVM, rc);
3907 return;
3908 }
3909 remAbort(rc, __FUNCTION__);
3910}
3911
3912int cpu_inb(CPUState *env, int addr)
3913{
3914 uint32_t u32 = 0;
3915 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
3916 if (RT_LIKELY(rc == VINF_SUCCESS))
3917 {
3918 if (/*addr != 0x61 && */addr != 0x71)
3919 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
3920 return (int)u32;
3921 }
3922 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
3923 {
3924 Log(("cpu_inb: addr=%#06x -> %#x rc=%Vrc\n", addr, u32, rc));
3925 remR3RaiseRC(env->pVM, rc);
3926 return (int)u32;
3927 }
3928 remAbort(rc, __FUNCTION__);
3929 return 0xff;
3930}
3931
3932int cpu_inw(CPUState *env, int addr)
3933{
3934 uint32_t u32 = 0;
3935 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
3936 if (RT_LIKELY(rc == VINF_SUCCESS))
3937 {
3938 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
3939 return (int)u32;
3940 }
3941 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
3942 {
3943 Log(("cpu_inw: addr=%#06x -> %#x rc=%Vrc\n", addr, u32, rc));
3944 remR3RaiseRC(env->pVM, rc);
3945 return (int)u32;
3946 }
3947 remAbort(rc, __FUNCTION__);
3948 return 0xffff;
3949}
3950
3951int cpu_inl(CPUState *env, int addr)
3952{
3953 uint32_t u32 = 0;
3954 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
3955 if (RT_LIKELY(rc == VINF_SUCCESS))
3956 {
3957//if (addr==0x01f0 && u32 == 0x6b6d)
3958// loglevel = ~0;
3959 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
3960 return (int)u32;
3961 }
3962 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
3963 {
3964 Log(("cpu_inl: addr=%#06x -> %#x rc=%Vrc\n", addr, u32, rc));
3965 remR3RaiseRC(env->pVM, rc);
3966 return (int)u32;
3967 }
3968 remAbort(rc, __FUNCTION__);
3969 return 0xffffffff;
3970}
3971
3972#undef LOG_GROUP
3973#define LOG_GROUP LOG_GROUP_REM
3974
3975
3976/* -+- helpers and misc other interfaces -+- */
3977
3978/**
3979 * Perform the CPUID instruction.
3980 *
3981 * ASMCpuId cannot be invoked from some source files where this is used because of global
3982 * register allocations.
3983 *
3984 * @param env Pointer to the recompiler CPU structure.
3985 * @param uOperator CPUID operation (eax).
3986 * @param pvEAX Where to store eax.
3987 * @param pvEBX Where to store ebx.
3988 * @param pvECX Where to store ecx.
3989 * @param pvEDX Where to store edx.
3990 */
3991void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
3992{
3993 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
3994}
3995
3996
3997#if 0 /* not used */
3998/**
3999 * Interface for qemu hardware to report back fatal errors.
4000 */
4001void hw_error(const char *pszFormat, ...)
4002{
4003 /*
4004 * Bitch about it.
4005 */
4006 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4007 * this in my Odin32 tree at home! */
4008 va_list args;
4009 va_start(args, pszFormat);
4010 RTLogPrintf("fatal error in virtual hardware:");
4011 RTLogPrintfV(pszFormat, args);
4012 va_end(args);
4013 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4014
4015 /*
4016 * If we're in REM context we'll sync back the state before 'jumping' to
4017 * the EMs failure handling.
4018 */
4019 PVM pVM = cpu_single_env->pVM;
4020 if (pVM->rem.s.fInREM)
4021 REMR3StateBack(pVM);
4022 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4023 AssertMsgFailed(("EMR3FatalError returned!\n"));
4024}
4025#endif
4026
4027/**
4028 * Interface for the qemu cpu to report unhandled situation
4029 * raising a fatal VM error.
4030 */
4031void cpu_abort(CPUState *env, const char *pszFormat, ...)
4032{
4033 /*
4034 * Bitch about it.
4035 */
4036 RTLogFlags(NULL, "nodisabled nobuffered");
4037 va_list args;
4038 va_start(args, pszFormat);
4039 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4040 va_end(args);
4041 va_start(args, pszFormat);
4042 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4043 va_end(args);
4044
4045 /*
4046 * If we're in REM context we'll sync back the state before 'jumping' to
4047 * the EMs failure handling.
4048 */
4049 PVM pVM = cpu_single_env->pVM;
4050 if (pVM->rem.s.fInREM)
4051 REMR3StateBack(pVM);
4052 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4053 AssertMsgFailed(("EMR3FatalError returned!\n"));
4054}
4055
4056
4057/**
4058 * Aborts the VM.
4059 *
4060 * @param rc VBox error code.
4061 * @param pszTip Hint about why/when this happend.
4062 */
4063static void remAbort(int rc, const char *pszTip)
4064{
4065 /*
4066 * Bitch about it.
4067 */
4068 RTLogPrintf("internal REM fatal error: rc=%Vrc %s\n", rc, pszTip);
4069 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Vrc %s\n", rc, pszTip));
4070
4071 /*
4072 * Jump back to where we entered the recompiler.
4073 */
4074 PVM pVM = cpu_single_env->pVM;
4075 if (pVM->rem.s.fInREM)
4076 REMR3StateBack(pVM);
4077 EMR3FatalError(pVM, rc);
4078 AssertMsgFailed(("EMR3FatalError returned!\n"));
4079}
4080
4081
4082/**
4083 * Dumps a linux system call.
4084 * @param pVM VM handle.
4085 */
4086void remR3DumpLnxSyscall(PVM pVM)
4087{
4088 static const char *apsz[] =
4089 {
4090 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4091 "sys_exit",
4092 "sys_fork",
4093 "sys_read",
4094 "sys_write",
4095 "sys_open", /* 5 */
4096 "sys_close",
4097 "sys_waitpid",
4098 "sys_creat",
4099 "sys_link",
4100 "sys_unlink", /* 10 */
4101 "sys_execve",
4102 "sys_chdir",
4103 "sys_time",
4104 "sys_mknod",
4105 "sys_chmod", /* 15 */
4106 "sys_lchown16",
4107 "sys_ni_syscall", /* old break syscall holder */
4108 "sys_stat",
4109 "sys_lseek",
4110 "sys_getpid", /* 20 */
4111 "sys_mount",
4112 "sys_oldumount",
4113 "sys_setuid16",
4114 "sys_getuid16",
4115 "sys_stime", /* 25 */
4116 "sys_ptrace",
4117 "sys_alarm",
4118 "sys_fstat",
4119 "sys_pause",
4120 "sys_utime", /* 30 */
4121 "sys_ni_syscall", /* old stty syscall holder */
4122 "sys_ni_syscall", /* old gtty syscall holder */
4123 "sys_access",
4124 "sys_nice",
4125 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4126 "sys_sync",
4127 "sys_kill",
4128 "sys_rename",
4129 "sys_mkdir",
4130 "sys_rmdir", /* 40 */
4131 "sys_dup",
4132 "sys_pipe",
4133 "sys_times",
4134 "sys_ni_syscall", /* old prof syscall holder */
4135 "sys_brk", /* 45 */
4136 "sys_setgid16",
4137 "sys_getgid16",
4138 "sys_signal",
4139 "sys_geteuid16",
4140 "sys_getegid16", /* 50 */
4141 "sys_acct",
4142 "sys_umount", /* recycled never used phys() */
4143 "sys_ni_syscall", /* old lock syscall holder */
4144 "sys_ioctl",
4145 "sys_fcntl", /* 55 */
4146 "sys_ni_syscall", /* old mpx syscall holder */
4147 "sys_setpgid",
4148 "sys_ni_syscall", /* old ulimit syscall holder */
4149 "sys_olduname",
4150 "sys_umask", /* 60 */
4151 "sys_chroot",
4152 "sys_ustat",
4153 "sys_dup2",
4154 "sys_getppid",
4155 "sys_getpgrp", /* 65 */
4156 "sys_setsid",
4157 "sys_sigaction",
4158 "sys_sgetmask",
4159 "sys_ssetmask",
4160 "sys_setreuid16", /* 70 */
4161 "sys_setregid16",
4162 "sys_sigsuspend",
4163 "sys_sigpending",
4164 "sys_sethostname",
4165 "sys_setrlimit", /* 75 */
4166 "sys_old_getrlimit",
4167 "sys_getrusage",
4168 "sys_gettimeofday",
4169 "sys_settimeofday",
4170 "sys_getgroups16", /* 80 */
4171 "sys_setgroups16",
4172 "old_select",
4173 "sys_symlink",
4174 "sys_lstat",
4175 "sys_readlink", /* 85 */
4176 "sys_uselib",
4177 "sys_swapon",
4178 "sys_reboot",
4179 "old_readdir",
4180 "old_mmap", /* 90 */
4181 "sys_munmap",
4182 "sys_truncate",
4183 "sys_ftruncate",
4184 "sys_fchmod",
4185 "sys_fchown16", /* 95 */
4186 "sys_getpriority",
4187 "sys_setpriority",
4188 "sys_ni_syscall", /* old profil syscall holder */
4189 "sys_statfs",
4190 "sys_fstatfs", /* 100 */
4191 "sys_ioperm",
4192 "sys_socketcall",
4193 "sys_syslog",
4194 "sys_setitimer",
4195 "sys_getitimer", /* 105 */
4196 "sys_newstat",
4197 "sys_newlstat",
4198 "sys_newfstat",
4199 "sys_uname",
4200 "sys_iopl", /* 110 */
4201 "sys_vhangup",
4202 "sys_ni_syscall", /* old "idle" system call */
4203 "sys_vm86old",
4204 "sys_wait4",
4205 "sys_swapoff", /* 115 */
4206 "sys_sysinfo",
4207 "sys_ipc",
4208 "sys_fsync",
4209 "sys_sigreturn",
4210 "sys_clone", /* 120 */
4211 "sys_setdomainname",
4212 "sys_newuname",
4213 "sys_modify_ldt",
4214 "sys_adjtimex",
4215 "sys_mprotect", /* 125 */
4216 "sys_sigprocmask",
4217 "sys_ni_syscall", /* old "create_module" */
4218 "sys_init_module",
4219 "sys_delete_module",
4220 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4221 "sys_quotactl",
4222 "sys_getpgid",
4223 "sys_fchdir",
4224 "sys_bdflush",
4225 "sys_sysfs", /* 135 */
4226 "sys_personality",
4227 "sys_ni_syscall", /* reserved for afs_syscall */
4228 "sys_setfsuid16",
4229 "sys_setfsgid16",
4230 "sys_llseek", /* 140 */
4231 "sys_getdents",
4232 "sys_select",
4233 "sys_flock",
4234 "sys_msync",
4235 "sys_readv", /* 145 */
4236 "sys_writev",
4237 "sys_getsid",
4238 "sys_fdatasync",
4239 "sys_sysctl",
4240 "sys_mlock", /* 150 */
4241 "sys_munlock",
4242 "sys_mlockall",
4243 "sys_munlockall",
4244 "sys_sched_setparam",
4245 "sys_sched_getparam", /* 155 */
4246 "sys_sched_setscheduler",
4247 "sys_sched_getscheduler",
4248 "sys_sched_yield",
4249 "sys_sched_get_priority_max",
4250 "sys_sched_get_priority_min", /* 160 */
4251 "sys_sched_rr_get_interval",
4252 "sys_nanosleep",
4253 "sys_mremap",
4254 "sys_setresuid16",
4255 "sys_getresuid16", /* 165 */
4256 "sys_vm86",
4257 "sys_ni_syscall", /* Old sys_query_module */
4258 "sys_poll",
4259 "sys_nfsservctl",
4260 "sys_setresgid16", /* 170 */
4261 "sys_getresgid16",
4262 "sys_prctl",
4263 "sys_rt_sigreturn",
4264 "sys_rt_sigaction",
4265 "sys_rt_sigprocmask", /* 175 */
4266 "sys_rt_sigpending",
4267 "sys_rt_sigtimedwait",
4268 "sys_rt_sigqueueinfo",
4269 "sys_rt_sigsuspend",
4270 "sys_pread64", /* 180 */
4271 "sys_pwrite64",
4272 "sys_chown16",
4273 "sys_getcwd",
4274 "sys_capget",
4275 "sys_capset", /* 185 */
4276 "sys_sigaltstack",
4277 "sys_sendfile",
4278 "sys_ni_syscall", /* reserved for streams1 */
4279 "sys_ni_syscall", /* reserved for streams2 */
4280 "sys_vfork", /* 190 */
4281 "sys_getrlimit",
4282 "sys_mmap2",
4283 "sys_truncate64",
4284 "sys_ftruncate64",
4285 "sys_stat64", /* 195 */
4286 "sys_lstat64",
4287 "sys_fstat64",
4288 "sys_lchown",
4289 "sys_getuid",
4290 "sys_getgid", /* 200 */
4291 "sys_geteuid",
4292 "sys_getegid",
4293 "sys_setreuid",
4294 "sys_setregid",
4295 "sys_getgroups", /* 205 */
4296 "sys_setgroups",
4297 "sys_fchown",
4298 "sys_setresuid",
4299 "sys_getresuid",
4300 "sys_setresgid", /* 210 */
4301 "sys_getresgid",
4302 "sys_chown",
4303 "sys_setuid",
4304 "sys_setgid",
4305 "sys_setfsuid", /* 215 */
4306 "sys_setfsgid",
4307 "sys_pivot_root",
4308 "sys_mincore",
4309 "sys_madvise",
4310 "sys_getdents64", /* 220 */
4311 "sys_fcntl64",
4312 "sys_ni_syscall", /* reserved for TUX */
4313 "sys_ni_syscall",
4314 "sys_gettid",
4315 "sys_readahead", /* 225 */
4316 "sys_setxattr",
4317 "sys_lsetxattr",
4318 "sys_fsetxattr",
4319 "sys_getxattr",
4320 "sys_lgetxattr", /* 230 */
4321 "sys_fgetxattr",
4322 "sys_listxattr",
4323 "sys_llistxattr",
4324 "sys_flistxattr",
4325 "sys_removexattr", /* 235 */
4326 "sys_lremovexattr",
4327 "sys_fremovexattr",
4328 "sys_tkill",
4329 "sys_sendfile64",
4330 "sys_futex", /* 240 */
4331 "sys_sched_setaffinity",
4332 "sys_sched_getaffinity",
4333 "sys_set_thread_area",
4334 "sys_get_thread_area",
4335 "sys_io_setup", /* 245 */
4336 "sys_io_destroy",
4337 "sys_io_getevents",
4338 "sys_io_submit",
4339 "sys_io_cancel",
4340 "sys_fadvise64", /* 250 */
4341 "sys_ni_syscall",
4342 "sys_exit_group",
4343 "sys_lookup_dcookie",
4344 "sys_epoll_create",
4345 "sys_epoll_ctl", /* 255 */
4346 "sys_epoll_wait",
4347 "sys_remap_file_pages",
4348 "sys_set_tid_address",
4349 "sys_timer_create",
4350 "sys_timer_settime", /* 260 */
4351 "sys_timer_gettime",
4352 "sys_timer_getoverrun",
4353 "sys_timer_delete",
4354 "sys_clock_settime",
4355 "sys_clock_gettime", /* 265 */
4356 "sys_clock_getres",
4357 "sys_clock_nanosleep",
4358 "sys_statfs64",
4359 "sys_fstatfs64",
4360 "sys_tgkill", /* 270 */
4361 "sys_utimes",
4362 "sys_fadvise64_64",
4363 "sys_ni_syscall" /* sys_vserver */
4364 };
4365
4366 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4367 switch (uEAX)
4368 {
4369 default:
4370 if (uEAX < ELEMENTS(apsz))
4371 Log(("REM: linux syscall %3d: %s (eip=%VGv ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4372 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4373 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4374 else
4375 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4376 break;
4377
4378 }
4379}
4380
4381
4382/**
4383 * Dumps an OpenBSD system call.
4384 * @param pVM VM handle.
4385 */
4386void remR3DumpOBsdSyscall(PVM pVM)
4387{
4388 static const char *apsz[] =
4389 {
4390 "SYS_syscall", //0
4391 "SYS_exit", //1
4392 "SYS_fork", //2
4393 "SYS_read", //3
4394 "SYS_write", //4
4395 "SYS_open", //5
4396 "SYS_close", //6
4397 "SYS_wait4", //7
4398 "SYS_8",
4399 "SYS_link", //9
4400 "SYS_unlink", //10
4401 "SYS_11",
4402 "SYS_chdir", //12
4403 "SYS_fchdir", //13
4404 "SYS_mknod", //14
4405 "SYS_chmod", //15
4406 "SYS_chown", //16
4407 "SYS_break", //17
4408 "SYS_18",
4409 "SYS_19",
4410 "SYS_getpid", //20
4411 "SYS_mount", //21
4412 "SYS_unmount", //22
4413 "SYS_setuid", //23
4414 "SYS_getuid", //24
4415 "SYS_geteuid", //25
4416 "SYS_ptrace", //26
4417 "SYS_recvmsg", //27
4418 "SYS_sendmsg", //28
4419 "SYS_recvfrom", //29
4420 "SYS_accept", //30
4421 "SYS_getpeername", //31
4422 "SYS_getsockname", //32
4423 "SYS_access", //33
4424 "SYS_chflags", //34
4425 "SYS_fchflags", //35
4426 "SYS_sync", //36
4427 "SYS_kill", //37
4428 "SYS_38",
4429 "SYS_getppid", //39
4430 "SYS_40",
4431 "SYS_dup", //41
4432 "SYS_opipe", //42
4433 "SYS_getegid", //43
4434 "SYS_profil", //44
4435 "SYS_ktrace", //45
4436 "SYS_sigaction", //46
4437 "SYS_getgid", //47
4438 "SYS_sigprocmask", //48
4439 "SYS_getlogin", //49
4440 "SYS_setlogin", //50
4441 "SYS_acct", //51
4442 "SYS_sigpending", //52
4443 "SYS_osigaltstack", //53
4444 "SYS_ioctl", //54
4445 "SYS_reboot", //55
4446 "SYS_revoke", //56
4447 "SYS_symlink", //57
4448 "SYS_readlink", //58
4449 "SYS_execve", //59
4450 "SYS_umask", //60
4451 "SYS_chroot", //61
4452 "SYS_62",
4453 "SYS_63",
4454 "SYS_64",
4455 "SYS_65",
4456 "SYS_vfork", //66
4457 "SYS_67",
4458 "SYS_68",
4459 "SYS_sbrk", //69
4460 "SYS_sstk", //70
4461 "SYS_61",
4462 "SYS_vadvise", //72
4463 "SYS_munmap", //73
4464 "SYS_mprotect", //74
4465 "SYS_madvise", //75
4466 "SYS_76",
4467 "SYS_77",
4468 "SYS_mincore", //78
4469 "SYS_getgroups", //79
4470 "SYS_setgroups", //80
4471 "SYS_getpgrp", //81
4472 "SYS_setpgid", //82
4473 "SYS_setitimer", //83
4474 "SYS_84",
4475 "SYS_85",
4476 "SYS_getitimer", //86
4477 "SYS_87",
4478 "SYS_88",
4479 "SYS_89",
4480 "SYS_dup2", //90
4481 "SYS_91",
4482 "SYS_fcntl", //92
4483 "SYS_select", //93
4484 "SYS_94",
4485 "SYS_fsync", //95
4486 "SYS_setpriority", //96
4487 "SYS_socket", //97
4488 "SYS_connect", //98
4489 "SYS_99",
4490 "SYS_getpriority", //100
4491 "SYS_101",
4492 "SYS_102",
4493 "SYS_sigreturn", //103
4494 "SYS_bind", //104
4495 "SYS_setsockopt", //105
4496 "SYS_listen", //106
4497 "SYS_107",
4498 "SYS_108",
4499 "SYS_109",
4500 "SYS_110",
4501 "SYS_sigsuspend", //111
4502 "SYS_112",
4503 "SYS_113",
4504 "SYS_114",
4505 "SYS_115",
4506 "SYS_gettimeofday", //116
4507 "SYS_getrusage", //117
4508 "SYS_getsockopt", //118
4509 "SYS_119",
4510 "SYS_readv", //120
4511 "SYS_writev", //121
4512 "SYS_settimeofday", //122
4513 "SYS_fchown", //123
4514 "SYS_fchmod", //124
4515 "SYS_125",
4516 "SYS_setreuid", //126
4517 "SYS_setregid", //127
4518 "SYS_rename", //128
4519 "SYS_129",
4520 "SYS_130",
4521 "SYS_flock", //131
4522 "SYS_mkfifo", //132
4523 "SYS_sendto", //133
4524 "SYS_shutdown", //134
4525 "SYS_socketpair", //135
4526 "SYS_mkdir", //136
4527 "SYS_rmdir", //137
4528 "SYS_utimes", //138
4529 "SYS_139",
4530 "SYS_adjtime", //140
4531 "SYS_141",
4532 "SYS_142",
4533 "SYS_143",
4534 "SYS_144",
4535 "SYS_145",
4536 "SYS_146",
4537 "SYS_setsid", //147
4538 "SYS_quotactl", //148
4539 "SYS_149",
4540 "SYS_150",
4541 "SYS_151",
4542 "SYS_152",
4543 "SYS_153",
4544 "SYS_154",
4545 "SYS_nfssvc", //155
4546 "SYS_156",
4547 "SYS_157",
4548 "SYS_158",
4549 "SYS_159",
4550 "SYS_160",
4551 "SYS_getfh", //161
4552 "SYS_162",
4553 "SYS_163",
4554 "SYS_164",
4555 "SYS_sysarch", //165
4556 "SYS_166",
4557 "SYS_167",
4558 "SYS_168",
4559 "SYS_169",
4560 "SYS_170",
4561 "SYS_171",
4562 "SYS_172",
4563 "SYS_pread", //173
4564 "SYS_pwrite", //174
4565 "SYS_175",
4566 "SYS_176",
4567 "SYS_177",
4568 "SYS_178",
4569 "SYS_179",
4570 "SYS_180",
4571 "SYS_setgid", //181
4572 "SYS_setegid", //182
4573 "SYS_seteuid", //183
4574 "SYS_lfs_bmapv", //184
4575 "SYS_lfs_markv", //185
4576 "SYS_lfs_segclean", //186
4577 "SYS_lfs_segwait", //187
4578 "SYS_188",
4579 "SYS_189",
4580 "SYS_190",
4581 "SYS_pathconf", //191
4582 "SYS_fpathconf", //192
4583 "SYS_swapctl", //193
4584 "SYS_getrlimit", //194
4585 "SYS_setrlimit", //195
4586 "SYS_getdirentries", //196
4587 "SYS_mmap", //197
4588 "SYS___syscall", //198
4589 "SYS_lseek", //199
4590 "SYS_truncate", //200
4591 "SYS_ftruncate", //201
4592 "SYS___sysctl", //202
4593 "SYS_mlock", //203
4594 "SYS_munlock", //204
4595 "SYS_205",
4596 "SYS_futimes", //206
4597 "SYS_getpgid", //207
4598 "SYS_xfspioctl", //208
4599 "SYS_209",
4600 "SYS_210",
4601 "SYS_211",
4602 "SYS_212",
4603 "SYS_213",
4604 "SYS_214",
4605 "SYS_215",
4606 "SYS_216",
4607 "SYS_217",
4608 "SYS_218",
4609 "SYS_219",
4610 "SYS_220",
4611 "SYS_semget", //221
4612 "SYS_222",
4613 "SYS_223",
4614 "SYS_224",
4615 "SYS_msgget", //225
4616 "SYS_msgsnd", //226
4617 "SYS_msgrcv", //227
4618 "SYS_shmat", //228
4619 "SYS_229",
4620 "SYS_shmdt", //230
4621 "SYS_231",
4622 "SYS_clock_gettime", //232
4623 "SYS_clock_settime", //233
4624 "SYS_clock_getres", //234
4625 "SYS_235",
4626 "SYS_236",
4627 "SYS_237",
4628 "SYS_238",
4629 "SYS_239",
4630 "SYS_nanosleep", //240
4631 "SYS_241",
4632 "SYS_242",
4633 "SYS_243",
4634 "SYS_244",
4635 "SYS_245",
4636 "SYS_246",
4637 "SYS_247",
4638 "SYS_248",
4639 "SYS_249",
4640 "SYS_minherit", //250
4641 "SYS_rfork", //251
4642 "SYS_poll", //252
4643 "SYS_issetugid", //253
4644 "SYS_lchown", //254
4645 "SYS_getsid", //255
4646 "SYS_msync", //256
4647 "SYS_257",
4648 "SYS_258",
4649 "SYS_259",
4650 "SYS_getfsstat", //260
4651 "SYS_statfs", //261
4652 "SYS_fstatfs", //262
4653 "SYS_pipe", //263
4654 "SYS_fhopen", //264
4655 "SYS_265",
4656 "SYS_fhstatfs", //266
4657 "SYS_preadv", //267
4658 "SYS_pwritev", //268
4659 "SYS_kqueue", //269
4660 "SYS_kevent", //270
4661 "SYS_mlockall", //271
4662 "SYS_munlockall", //272
4663 "SYS_getpeereid", //273
4664 "SYS_274",
4665 "SYS_275",
4666 "SYS_276",
4667 "SYS_277",
4668 "SYS_278",
4669 "SYS_279",
4670 "SYS_280",
4671 "SYS_getresuid", //281
4672 "SYS_setresuid", //282
4673 "SYS_getresgid", //283
4674 "SYS_setresgid", //284
4675 "SYS_285",
4676 "SYS_mquery", //286
4677 "SYS_closefrom", //287
4678 "SYS_sigaltstack", //288
4679 "SYS_shmget", //289
4680 "SYS_semop", //290
4681 "SYS_stat", //291
4682 "SYS_fstat", //292
4683 "SYS_lstat", //293
4684 "SYS_fhstat", //294
4685 "SYS___semctl", //295
4686 "SYS_shmctl", //296
4687 "SYS_msgctl", //297
4688 "SYS_MAXSYSCALL", //298
4689 //299
4690 //300
4691 };
4692 uint32_t uEAX;
4693 if (!LogIsEnabled())
4694 return;
4695 uEAX = CPUMGetGuestEAX(pVM);
4696 switch (uEAX)
4697 {
4698 default:
4699 if (uEAX < ELEMENTS(apsz))
4700 {
4701 uint32_t au32Args[8] = {0};
4702 PGMPhysReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
4703 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
4704 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
4705 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
4706 }
4707 else
4708 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
4709 break;
4710 }
4711}
4712
4713
4714#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
4715/**
4716 * The Dll main entry point (stub).
4717 */
4718bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
4719{
4720 return true;
4721}
4722
4723void *memcpy(void *dst, const void *src, size_t size)
4724{
4725 uint8_t*pbDst = dst, *pbSrc = src;
4726 while (size-- > 0)
4727 *pbDst++ = *pbSrc++;
4728 return dst;
4729}
4730
4731#endif
4732
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette