VirtualBox

source: vbox/trunk/src/recompiler_new/VBoxRecompiler.c@ 18595

Last change on this file since 18595 was 18595, checked in by vboxsync, 16 years ago

REM: synced over TLB and TB stats from the old code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 155.6 KB
Line 
1/* $Id: VBoxRecompiler.c 18595 2009-04-01 12:17:30Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108
109/*******************************************************************************
110* Global Variables *
111*******************************************************************************/
112
113/** @todo Move stats to REM::s some rainy day we have nothing do to. */
114#ifdef VBOX_WITH_STATISTICS
115static STAMPROFILEADV gStatExecuteSingleInstr;
116static STAMPROFILEADV gStatCompilationQEmu;
117static STAMPROFILEADV gStatRunCodeQEmu;
118static STAMPROFILEADV gStatTotalTimeQEmu;
119static STAMPROFILEADV gStatTimers;
120static STAMPROFILEADV gStatTBLookup;
121static STAMPROFILEADV gStatIRQ;
122static STAMPROFILEADV gStatRawCheck;
123static STAMPROFILEADV gStatMemRead;
124static STAMPROFILEADV gStatMemWrite;
125static STAMPROFILE gStatGCPhys2HCVirt;
126static STAMPROFILE gStatHCVirt2GCPhys;
127static STAMCOUNTER gStatCpuGetTSC;
128static STAMCOUNTER gStatRefuseTFInhibit;
129static STAMCOUNTER gStatRefuseVM86;
130static STAMCOUNTER gStatRefusePaging;
131static STAMCOUNTER gStatRefusePAE;
132static STAMCOUNTER gStatRefuseIOPLNot0;
133static STAMCOUNTER gStatRefuseIF0;
134static STAMCOUNTER gStatRefuseCode16;
135static STAMCOUNTER gStatRefuseWP0;
136static STAMCOUNTER gStatRefuseRing1or2;
137static STAMCOUNTER gStatRefuseCanExecute;
138static STAMCOUNTER gStatREMGDTChange;
139static STAMCOUNTER gStatREMIDTChange;
140static STAMCOUNTER gStatREMLDTRChange;
141static STAMCOUNTER gStatREMTRChange;
142static STAMCOUNTER gStatSelOutOfSync[6];
143static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
144static STAMCOUNTER gStatFlushTBs;
145/* in exec.c */
146extern uint32_t tlb_flush_count;
147extern uint32_t tb_flush_count;
148extern uint32_t tb_phys_invalidate_count;
149#endif
150
151/*
152 * Global stuff.
153 */
154
155/** MMIO read callbacks. */
156CPUReadMemoryFunc *g_apfnMMIORead[3] =
157{
158 remR3MMIOReadU8,
159 remR3MMIOReadU16,
160 remR3MMIOReadU32
161};
162
163/** MMIO write callbacks. */
164CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
165{
166 remR3MMIOWriteU8,
167 remR3MMIOWriteU16,
168 remR3MMIOWriteU32
169};
170
171/** Handler read callbacks. */
172CPUReadMemoryFunc *g_apfnHandlerRead[3] =
173{
174 remR3HandlerReadU8,
175 remR3HandlerReadU16,
176 remR3HandlerReadU32
177};
178
179/** Handler write callbacks. */
180CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
181{
182 remR3HandlerWriteU8,
183 remR3HandlerWriteU16,
184 remR3HandlerWriteU32
185};
186
187
188#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
189/*
190 * Debugger commands.
191 */
192static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
193
194/** '.remstep' arguments. */
195static const DBGCVARDESC g_aArgRemStep[] =
196{
197 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
198 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
199};
200
201/** Command descriptors. */
202static const DBGCCMD g_aCmds[] =
203{
204 {
205 .pszCmd ="remstep",
206 .cArgsMin = 0,
207 .cArgsMax = 1,
208 .paArgDescs = &g_aArgRemStep[0],
209 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
210 .pResultDesc = NULL,
211 .fFlags = 0,
212 .pfnHandler = remR3CmdDisasEnableStepping,
213 .pszSyntax = "[on/off]",
214 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
215 "If no arguments show the current state."
216 }
217};
218#endif
219
220/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
221uint8_t *code_gen_prologue;
222
223
224/*******************************************************************************
225* Internal Functions *
226*******************************************************************************/
227void remAbort(int rc, const char *pszTip);
228extern int testmath(void);
229
230/* Put them here to avoid unused variable warning. */
231AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
232#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
233//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
234/* Why did this have to be identical?? */
235AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
236#else
237AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
238#endif
239
240
241/**
242 * Initializes the REM.
243 *
244 * @returns VBox status code.
245 * @param pVM The VM to operate on.
246 */
247REMR3DECL(int) REMR3Init(PVM pVM)
248{
249 uint32_t u32Dummy;
250 int rc;
251
252#ifdef VBOX_ENABLE_VBOXREM64
253 LogRel(("Using 64-bit aware REM\n"));
254#endif
255
256 /*
257 * Assert sanity.
258 */
259 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
260 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
261 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
262#if defined(DEBUG) && !defined(RT_OS_SOLARIS) /// @todo fix the solaris math stuff.
263 Assert(!testmath());
264#endif
265
266 /*
267 * Init some internal data members.
268 */
269 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
270 pVM->rem.s.Env.pVM = pVM;
271#ifdef CPU_RAW_MODE_INIT
272 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
273#endif
274
275 /* ctx. */
276 pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVM);
277 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order have changed! REM depends on notification about ALL physical memory registrations\n"));
278
279 /* ignore all notifications */
280 pVM->rem.s.fIgnoreAll = true;
281
282 code_gen_prologue = RTMemExecAlloc(_1K);
283 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
284
285 cpu_exec_init_all(0);
286
287 /*
288 * Init the recompiler.
289 */
290 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
291 {
292 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
293 return VERR_GENERAL_FAILURE;
294 }
295 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
296 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
297
298 /* allocate code buffer for single instruction emulation. */
299 pVM->rem.s.Env.cbCodeBuffer = 4096;
300 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
301 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
302
303 /* finally, set the cpu_single_env global. */
304 cpu_single_env = &pVM->rem.s.Env;
305
306 /* Nothing is pending by default */
307 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
308
309 /*
310 * Register ram types.
311 */
312 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
313 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
314 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
315 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
316 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
317
318 /* stop ignoring. */
319 pVM->rem.s.fIgnoreAll = false;
320
321 /*
322 * Register the saved state data unit.
323 */
324 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
325 NULL, remR3Save, NULL,
326 NULL, remR3Load, NULL);
327 if (RT_FAILURE(rc))
328 return rc;
329
330#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
331 /*
332 * Debugger commands.
333 */
334 static bool fRegisteredCmds = false;
335 if (!fRegisteredCmds)
336 {
337 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
338 if (RT_SUCCESS(rc))
339 fRegisteredCmds = true;
340 }
341#endif
342
343#ifdef VBOX_WITH_STATISTICS
344 /*
345 * Statistics.
346 */
347 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
348 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
349 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
350 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
351 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
352 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
353 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
354 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
355 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
356 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
357 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
358 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
359
360 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
361
362 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
363 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
364 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
365 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
366 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
367 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
368 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
369 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
370 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
371 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
372 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
373
374 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
375 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
376 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
377 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
378
379 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
380 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
381 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
382 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
383 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
384 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
385
386 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
387 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
388 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
389 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
390 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
391 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
392#endif /* VBOX_WITH_STATISTICS */
393
394 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
395 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
396 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
397
398
399#ifdef DEBUG_ALL_LOGGING
400 loglevel = ~0;
401# ifdef DEBUG_TMP_LOGGING
402 logfile = fopen("/tmp/vbox-qemu.log", "w");
403# endif
404#endif
405
406 return rc;
407}
408
409
410/**
411 * Finalizes the REM initialization.
412 *
413 * This is called after all components, devices and drivers has
414 * been initialized. Its main purpose it to finish the RAM related
415 * initialization.
416 *
417 * @returns VBox status code.
418 *
419 * @param pVM The VM handle.
420 */
421REMR3DECL(int) REMR3InitFinalize(PVM pVM)
422{
423 int rc;
424
425 /*
426 * Ram size & dirty bit map.
427 */
428 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
429 pVM->rem.s.fGCPhysLastRamFixed = true;
430#ifdef RT_STRICT
431 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
432#else
433 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
434#endif
435 return rc;
436}
437
438
439/**
440 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
441 *
442 * @returns VBox status code.
443 * @param pVM The VM handle.
444 * @param fGuarded Whether to guard the map.
445 */
446static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
447{
448 int rc = VINF_SUCCESS;
449 RTGCPHYS cb;
450
451 cb = pVM->rem.s.GCPhysLastRam + 1;
452 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
453 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
454 VERR_OUT_OF_RANGE);
455 phys_ram_size = cb;
456 phys_ram_dirty_size = cb >> PAGE_SHIFT;
457 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
458
459 if (!fGuarded)
460 {
461 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
462 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
463 }
464 else
465 {
466 /*
467 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
468 */
469 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
470 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
471 if (cbBitmapFull == cbBitmapAligned)
472 cbBitmapFull += _4G >> PAGE_SHIFT;
473 else if (cbBitmapFull - cbBitmapAligned < _64K)
474 cbBitmapFull += _64K;
475
476 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
477 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
478
479 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
480 if (RT_FAILURE(rc))
481 {
482 RTMemPageFree(phys_ram_dirty);
483 AssertLogRelRCReturn(rc, rc);
484 }
485
486 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
487 }
488
489 /* initialize it. */
490 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
491 return rc;
492}
493
494
495/**
496 * Terminates the REM.
497 *
498 * Termination means cleaning up and freeing all resources,
499 * the VM it self is at this point powered off or suspended.
500 *
501 * @returns VBox status code.
502 * @param pVM The VM to operate on.
503 */
504REMR3DECL(int) REMR3Term(PVM pVM)
505{
506 return VINF_SUCCESS;
507}
508
509
510/**
511 * The VM is being reset.
512 *
513 * For the REM component this means to call the cpu_reset() and
514 * reinitialize some state variables.
515 *
516 * @param pVM VM handle.
517 */
518REMR3DECL(void) REMR3Reset(PVM pVM)
519{
520 /*
521 * Reset the REM cpu.
522 */
523 pVM->rem.s.fIgnoreAll = true;
524 cpu_reset(&pVM->rem.s.Env);
525 pVM->rem.s.cInvalidatedPages = 0;
526 pVM->rem.s.fIgnoreAll = false;
527
528 /* Clear raw ring 0 init state */
529 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
530
531 /* Flush the TBs the next time we execute code here. */
532 pVM->rem.s.fFlushTBs = true;
533}
534
535
536/**
537 * Execute state save operation.
538 *
539 * @returns VBox status code.
540 * @param pVM VM Handle.
541 * @param pSSM SSM operation handle.
542 */
543static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
544{
545 PREM pRem = &pVM->rem.s;
546
547 /*
548 * Save the required CPU Env bits.
549 * (Not much because we're never in REM when doing the save.)
550 */
551 LogFlow(("remR3Save:\n"));
552 Assert(!pRem->fInREM);
553 SSMR3PutU32(pSSM, pRem->Env.hflags);
554 SSMR3PutU32(pSSM, ~0); /* separator */
555
556 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
557 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
558 SSMR3PutUInt(pSSM, pVM->rem.s.u32PendingInterrupt);
559
560 return SSMR3PutU32(pSSM, ~0); /* terminator */
561}
562
563
564/**
565 * Execute state load operation.
566 *
567 * @returns VBox status code.
568 * @param pVM VM Handle.
569 * @param pSSM SSM operation handle.
570 * @param u32Version Data layout version.
571 */
572static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
573{
574 uint32_t u32Dummy;
575 uint32_t fRawRing0 = false;
576 uint32_t u32Sep;
577 int rc;
578 PREM pRem;
579 LogFlow(("remR3Load:\n"));
580
581 /*
582 * Validate version.
583 */
584 if ( u32Version != REM_SAVED_STATE_VERSION
585 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
586 {
587 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
588 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
589 }
590
591 /*
592 * Do a reset to be on the safe side...
593 */
594 REMR3Reset(pVM);
595
596 /*
597 * Ignore all ignorable notifications.
598 * (Not doing this will cause serious trouble.)
599 */
600 pVM->rem.s.fIgnoreAll = true;
601
602 /*
603 * Load the required CPU Env bits.
604 * (Not much because we're never in REM when doing the save.)
605 */
606 pRem = &pVM->rem.s;
607 Assert(!pRem->fInREM);
608 SSMR3GetU32(pSSM, &pRem->Env.hflags);
609 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
610 {
611 /* Redundant REM CPU state has to be loaded, but can be ignored. */
612 CPUX86State_Ver16 temp;
613 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
614 }
615
616 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
617 if (RT_FAILURE(rc))
618 return rc;
619 if (u32Sep != ~0U)
620 {
621 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
622 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
623 }
624
625 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
626 SSMR3GetUInt(pSSM, &fRawRing0);
627 if (fRawRing0)
628 pRem->Env.state |= CPU_RAW_RING0;
629
630 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
631 {
632 unsigned i;
633
634 /*
635 * Load the REM stuff.
636 */
637 rc = SSMR3GetUInt(pSSM, &pRem->cInvalidatedPages);
638 if (RT_FAILURE(rc))
639 return rc;
640 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
641 {
642 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
643 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
644 }
645 for (i = 0; i < pRem->cInvalidatedPages; i++)
646 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
647 }
648
649 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
650 if (RT_FAILURE(rc))
651 return rc;
652
653 /* check the terminator. */
654 rc = SSMR3GetU32(pSSM, &u32Sep);
655 if (RT_FAILURE(rc))
656 return rc;
657 if (u32Sep != ~0U)
658 {
659 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
660 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
661 }
662
663 /*
664 * Get the CPUID features.
665 */
666 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
667 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
668
669 /*
670 * Sync the Load Flush the TLB
671 */
672 tlb_flush(&pRem->Env, 1);
673
674 /*
675 * Stop ignoring ignornable notifications.
676 */
677 pVM->rem.s.fIgnoreAll = false;
678
679 /*
680 * Sync the whole CPU state when executing code in the recompiler.
681 */
682 CPUMSetChangedFlags(pVM, CPUM_CHANGED_ALL);
683 return VINF_SUCCESS;
684}
685
686
687
688#undef LOG_GROUP
689#define LOG_GROUP LOG_GROUP_REM_RUN
690
691/**
692 * Single steps an instruction in recompiled mode.
693 *
694 * Before calling this function the REM state needs to be in sync with
695 * the VM. Call REMR3State() to perform the sync. It's only necessary
696 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
697 * and after calling REMR3StateBack().
698 *
699 * @returns VBox status code.
700 *
701 * @param pVM VM Handle.
702 */
703REMR3DECL(int) REMR3Step(PVM pVM)
704{
705 int rc, interrupt_request;
706 RTGCPTR GCPtrPC;
707 bool fBp;
708
709 /*
710 * Lock the REM - we don't wanna have anyone interrupting us
711 * while stepping - and enabled single stepping. We also ignore
712 * pending interrupts and suchlike.
713 */
714 interrupt_request = pVM->rem.s.Env.interrupt_request;
715 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
716 pVM->rem.s.Env.interrupt_request = 0;
717 cpu_single_step(&pVM->rem.s.Env, 1);
718
719 /*
720 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
721 */
722 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
723 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
724
725 /*
726 * Execute and handle the return code.
727 * We execute without enabling the cpu tick, so on success we'll
728 * just flip it on and off to make sure it moves
729 */
730 rc = cpu_exec(&pVM->rem.s.Env);
731 if (rc == EXCP_DEBUG)
732 {
733 TMCpuTickResume(pVM);
734 TMCpuTickPause(pVM);
735 TMVirtualResume(pVM);
736 TMVirtualPause(pVM);
737 rc = VINF_EM_DBG_STEPPED;
738 }
739 else
740 {
741 switch (rc)
742 {
743 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
744 case EXCP_HLT:
745 case EXCP_HALTED: rc = VINF_EM_HALT; break;
746 case EXCP_RC:
747 rc = pVM->rem.s.rc;
748 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
749 break;
750 case EXCP_EXECUTE_RAW:
751 case EXCP_EXECUTE_HWACC:
752 /** @todo: is it correct? No! */
753 rc = VINF_SUCCESS;
754 break;
755 default:
756 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
757 rc = VERR_INTERNAL_ERROR;
758 break;
759 }
760 }
761
762 /*
763 * Restore the stuff we changed to prevent interruption.
764 * Unlock the REM.
765 */
766 if (fBp)
767 {
768 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
769 Assert(rc2 == 0); NOREF(rc2);
770 }
771 cpu_single_step(&pVM->rem.s.Env, 0);
772 pVM->rem.s.Env.interrupt_request = interrupt_request;
773
774 return rc;
775}
776
777
778/**
779 * Set a breakpoint using the REM facilities.
780 *
781 * @returns VBox status code.
782 * @param pVM The VM handle.
783 * @param Address The breakpoint address.
784 * @thread The emulation thread.
785 */
786REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
787{
788 VM_ASSERT_EMT(pVM);
789 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
790 {
791 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
792 return VINF_SUCCESS;
793 }
794 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
795 return VERR_REM_NO_MORE_BP_SLOTS;
796}
797
798
799/**
800 * Clears a breakpoint set by REMR3BreakpointSet().
801 *
802 * @returns VBox status code.
803 * @param pVM The VM handle.
804 * @param Address The breakpoint address.
805 * @thread The emulation thread.
806 */
807REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
808{
809 VM_ASSERT_EMT(pVM);
810 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
811 {
812 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
813 return VINF_SUCCESS;
814 }
815 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
816 return VERR_REM_BP_NOT_FOUND;
817}
818
819
820/**
821 * Emulate an instruction.
822 *
823 * This function executes one instruction without letting anyone
824 * interrupt it. This is intended for being called while being in
825 * raw mode and thus will take care of all the state syncing between
826 * REM and the rest.
827 *
828 * @returns VBox status code.
829 * @param pVM VM handle.
830 */
831REMR3DECL(int) REMR3EmulateInstruction(PVM pVM)
832{
833 bool fFlushTBs;
834
835 int rc, rc2;
836 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
837
838 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
839 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
840 */
841 if (HWACCMIsEnabled(pVM))
842 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
843
844 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
845 fFlushTBs = pVM->rem.s.fFlushTBs;
846 pVM->rem.s.fFlushTBs = false;
847
848 /*
849 * Sync the state and enable single instruction / single stepping.
850 */
851 rc = REMR3State(pVM);
852 pVM->rem.s.fFlushTBs = fFlushTBs;
853 if (RT_SUCCESS(rc))
854 {
855 int interrupt_request = pVM->rem.s.Env.interrupt_request;
856 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
857 Assert(!pVM->rem.s.Env.singlestep_enabled);
858 /*
859 * Now we set the execute single instruction flag and enter the cpu_exec loop.
860 */
861 TMNotifyStartOfExecution(pVM);
862 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
863 rc = cpu_exec(&pVM->rem.s.Env);
864 TMNotifyEndOfExecution(pVM);
865 switch (rc)
866 {
867 /*
868 * Executed without anything out of the way happening.
869 */
870 case EXCP_SINGLE_INSTR:
871 rc = VINF_EM_RESCHEDULE;
872 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
873 break;
874
875 /*
876 * If we take a trap or start servicing a pending interrupt, we might end up here.
877 * (Timer thread or some other thread wishing EMT's attention.)
878 */
879 case EXCP_INTERRUPT:
880 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
881 rc = VINF_EM_RESCHEDULE;
882 break;
883
884 /*
885 * Single step, we assume!
886 * If there was a breakpoint there we're fucked now.
887 */
888 case EXCP_DEBUG:
889 {
890 /* breakpoint or single step? */
891 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
892 int iBP;
893 rc = VINF_EM_DBG_STEPPED;
894 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
895 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
896 {
897 rc = VINF_EM_DBG_BREAKPOINT;
898 break;
899 }
900 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
901 break;
902 }
903
904 /*
905 * hlt instruction.
906 */
907 case EXCP_HLT:
908 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
909 rc = VINF_EM_HALT;
910 break;
911
912 /*
913 * The VM has halted.
914 */
915 case EXCP_HALTED:
916 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
917 rc = VINF_EM_HALT;
918 break;
919
920 /*
921 * Switch to RAW-mode.
922 */
923 case EXCP_EXECUTE_RAW:
924 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
925 rc = VINF_EM_RESCHEDULE_RAW;
926 break;
927
928 /*
929 * Switch to hardware accelerated RAW-mode.
930 */
931 case EXCP_EXECUTE_HWACC:
932 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
933 rc = VINF_EM_RESCHEDULE_HWACC;
934 break;
935
936 /*
937 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
938 */
939 case EXCP_RC:
940 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
941 rc = pVM->rem.s.rc;
942 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
943 break;
944
945 /*
946 * Figure out the rest when they arrive....
947 */
948 default:
949 AssertMsgFailed(("rc=%d\n", rc));
950 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
951 rc = VINF_EM_RESCHEDULE;
952 break;
953 }
954
955 /*
956 * Switch back the state.
957 */
958 pVM->rem.s.Env.interrupt_request = interrupt_request;
959 rc2 = REMR3StateBack(pVM);
960 AssertRC(rc2);
961 }
962
963 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
964 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
965 return rc;
966}
967
968
969/**
970 * Runs code in recompiled mode.
971 *
972 * Before calling this function the REM state needs to be in sync with
973 * the VM. Call REMR3State() to perform the sync. It's only necessary
974 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
975 * and after calling REMR3StateBack().
976 *
977 * @returns VBox status code.
978 *
979 * @param pVM VM Handle.
980 */
981REMR3DECL(int) REMR3Run(PVM pVM)
982{
983 int rc;
984 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
985 Assert(pVM->rem.s.fInREM);
986
987 TMNotifyStartOfExecution(pVM);
988 rc = cpu_exec(&pVM->rem.s.Env);
989 TMNotifyEndOfExecution(pVM);
990 switch (rc)
991 {
992 /*
993 * This happens when the execution was interrupted
994 * by an external event, like pending timers.
995 */
996 case EXCP_INTERRUPT:
997 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
998 rc = VINF_SUCCESS;
999 break;
1000
1001 /*
1002 * hlt instruction.
1003 */
1004 case EXCP_HLT:
1005 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1006 rc = VINF_EM_HALT;
1007 break;
1008
1009 /*
1010 * The VM has halted.
1011 */
1012 case EXCP_HALTED:
1013 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1014 rc = VINF_EM_HALT;
1015 break;
1016
1017 /*
1018 * Breakpoint/single step.
1019 */
1020 case EXCP_DEBUG:
1021 {
1022#if 0//def DEBUG_bird
1023 static int iBP = 0;
1024 printf("howdy, breakpoint! iBP=%d\n", iBP);
1025 switch (iBP)
1026 {
1027 case 0:
1028 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1029 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1030 //pVM->rem.s.Env.interrupt_request = 0;
1031 //pVM->rem.s.Env.exception_index = -1;
1032 //g_fInterruptDisabled = 1;
1033 rc = VINF_SUCCESS;
1034 asm("int3");
1035 break;
1036 default:
1037 asm("int3");
1038 break;
1039 }
1040 iBP++;
1041#else
1042 /* breakpoint or single step? */
1043 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1044 int iBP;
1045 rc = VINF_EM_DBG_STEPPED;
1046 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1047 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1048 {
1049 rc = VINF_EM_DBG_BREAKPOINT;
1050 break;
1051 }
1052 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1053#endif
1054 break;
1055 }
1056
1057 /*
1058 * Switch to RAW-mode.
1059 */
1060 case EXCP_EXECUTE_RAW:
1061 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1062 rc = VINF_EM_RESCHEDULE_RAW;
1063 break;
1064
1065 /*
1066 * Switch to hardware accelerated RAW-mode.
1067 */
1068 case EXCP_EXECUTE_HWACC:
1069 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1070 rc = VINF_EM_RESCHEDULE_HWACC;
1071 break;
1072
1073 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1074 /*
1075 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1076 */
1077 case EXCP_RC:
1078 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1079 rc = pVM->rem.s.rc;
1080 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1081 break;
1082
1083 /*
1084 * Figure out the rest when they arrive....
1085 */
1086 default:
1087 AssertMsgFailed(("rc=%d\n", rc));
1088 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1089 rc = VINF_SUCCESS;
1090 break;
1091 }
1092
1093 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1094 return rc;
1095}
1096
1097
1098/**
1099 * Check if the cpu state is suitable for Raw execution.
1100 *
1101 * @returns boolean
1102 * @param env The CPU env struct.
1103 * @param eip The EIP to check this for (might differ from env->eip).
1104 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1105 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1106 *
1107 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1108 */
1109bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1110{
1111 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1112 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1113 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1114 uint32_t u32CR0;
1115
1116 /* Update counter. */
1117 env->pVM->rem.s.cCanExecuteRaw++;
1118
1119 if (HWACCMIsEnabled(env->pVM))
1120 {
1121 CPUMCTX Ctx;
1122
1123 env->state |= CPU_RAW_HWACC;
1124
1125 /*
1126 * Create partial context for HWACCMR3CanExecuteGuest
1127 */
1128 Ctx.cr0 = env->cr[0];
1129 Ctx.cr3 = env->cr[3];
1130 Ctx.cr4 = env->cr[4];
1131
1132 Ctx.tr = env->tr.selector;
1133 Ctx.trHid.u64Base = env->tr.base;
1134 Ctx.trHid.u32Limit = env->tr.limit;
1135 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1136
1137 Ctx.idtr.cbIdt = env->idt.limit;
1138 Ctx.idtr.pIdt = env->idt.base;
1139
1140 Ctx.gdtr.cbGdt = env->gdt.limit;
1141 Ctx.gdtr.pGdt = env->gdt.base;
1142
1143 Ctx.rsp = env->regs[R_ESP];
1144 Ctx.rip = env->eip;
1145
1146 Ctx.eflags.u32 = env->eflags;
1147
1148 Ctx.cs = env->segs[R_CS].selector;
1149 Ctx.csHid.u64Base = env->segs[R_CS].base;
1150 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1151 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1152
1153 Ctx.ds = env->segs[R_DS].selector;
1154 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1155 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1156 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1157
1158 Ctx.es = env->segs[R_ES].selector;
1159 Ctx.esHid.u64Base = env->segs[R_ES].base;
1160 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1161 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1162
1163 Ctx.fs = env->segs[R_FS].selector;
1164 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1165 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1166 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1167
1168 Ctx.gs = env->segs[R_GS].selector;
1169 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1170 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1171 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1172
1173 Ctx.ss = env->segs[R_SS].selector;
1174 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1175 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1176 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1177
1178 Ctx.msrEFER = env->efer;
1179
1180 /* Hardware accelerated raw-mode:
1181 *
1182 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1183 */
1184 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1185 {
1186 *piException = EXCP_EXECUTE_HWACC;
1187 return true;
1188 }
1189 return false;
1190 }
1191
1192 /*
1193 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1194 * or 32 bits protected mode ring 0 code
1195 *
1196 * The tests are ordered by the likelyhood of being true during normal execution.
1197 */
1198 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1199 {
1200 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1201 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1202 return false;
1203 }
1204
1205#ifndef VBOX_RAW_V86
1206 if (fFlags & VM_MASK) {
1207 STAM_COUNTER_INC(&gStatRefuseVM86);
1208 Log2(("raw mode refused: VM_MASK\n"));
1209 return false;
1210 }
1211#endif
1212
1213 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1214 {
1215#ifndef DEBUG_bird
1216 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1217#endif
1218 return false;
1219 }
1220
1221 if (env->singlestep_enabled)
1222 {
1223 //Log2(("raw mode refused: Single step\n"));
1224 return false;
1225 }
1226
1227 if (env->nb_breakpoints > 0)
1228 {
1229 //Log2(("raw mode refused: Breakpoints\n"));
1230 return false;
1231 }
1232
1233 u32CR0 = env->cr[0];
1234 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1235 {
1236 STAM_COUNTER_INC(&gStatRefusePaging);
1237 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1238 return false;
1239 }
1240
1241 if (env->cr[4] & CR4_PAE_MASK)
1242 {
1243 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1244 {
1245 STAM_COUNTER_INC(&gStatRefusePAE);
1246 return false;
1247 }
1248 }
1249
1250 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1251 {
1252 if (!EMIsRawRing3Enabled(env->pVM))
1253 return false;
1254
1255 if (!(env->eflags & IF_MASK))
1256 {
1257 STAM_COUNTER_INC(&gStatRefuseIF0);
1258 Log2(("raw mode refused: IF (RawR3)\n"));
1259 return false;
1260 }
1261
1262 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1263 {
1264 STAM_COUNTER_INC(&gStatRefuseWP0);
1265 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1266 return false;
1267 }
1268 }
1269 else
1270 {
1271 if (!EMIsRawRing0Enabled(env->pVM))
1272 return false;
1273
1274 // Let's start with pure 32 bits ring 0 code first
1275 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1276 {
1277 STAM_COUNTER_INC(&gStatRefuseCode16);
1278 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1279 return false;
1280 }
1281
1282 // Only R0
1283 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1284 {
1285 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1286 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1287 return false;
1288 }
1289
1290 if (!(u32CR0 & CR0_WP_MASK))
1291 {
1292 STAM_COUNTER_INC(&gStatRefuseWP0);
1293 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1294 return false;
1295 }
1296
1297 if (PATMIsPatchGCAddr(env->pVM, eip))
1298 {
1299 Log2(("raw r0 mode forced: patch code\n"));
1300 *piException = EXCP_EXECUTE_RAW;
1301 return true;
1302 }
1303
1304#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1305 if (!(env->eflags & IF_MASK))
1306 {
1307 STAM_COUNTER_INC(&gStatRefuseIF0);
1308 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1309 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1310 return false;
1311 }
1312#endif
1313
1314 env->state |= CPU_RAW_RING0;
1315 }
1316
1317 /*
1318 * Don't reschedule the first time we're called, because there might be
1319 * special reasons why we're here that is not covered by the above checks.
1320 */
1321 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1322 {
1323 Log2(("raw mode refused: first scheduling\n"));
1324 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1325 return false;
1326 }
1327
1328 Assert(PGMPhysIsA20Enabled(env->pVM));
1329 *piException = EXCP_EXECUTE_RAW;
1330 return true;
1331}
1332
1333
1334/**
1335 * Fetches a code byte.
1336 *
1337 * @returns Success indicator (bool) for ease of use.
1338 * @param env The CPU environment structure.
1339 * @param GCPtrInstr Where to fetch code.
1340 * @param pu8Byte Where to store the byte on success
1341 */
1342bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1343{
1344 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1345 if (RT_SUCCESS(rc))
1346 return true;
1347 return false;
1348}
1349
1350
1351/**
1352 * Flush (or invalidate if you like) page table/dir entry.
1353 *
1354 * (invlpg instruction; tlb_flush_page)
1355 *
1356 * @param env Pointer to cpu environment.
1357 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1358 */
1359void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1360{
1361 PVM pVM = env->pVM;
1362 PCPUMCTX pCtx;
1363 int rc;
1364
1365 /*
1366 * When we're replaying invlpg instructions or restoring a saved
1367 * state we disable this path.
1368 */
1369 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.fIgnoreAll)
1370 return;
1371 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1372 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1373
1374 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1375
1376 /*
1377 * Update the control registers before calling PGMFlushPage.
1378 */
1379 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1380 pCtx->cr0 = env->cr[0];
1381 pCtx->cr3 = env->cr[3];
1382 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1383 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1384 pCtx->cr4 = env->cr[4];
1385
1386 /*
1387 * Let PGM do the rest.
1388 */
1389 rc = PGMInvalidatePage(pVM, GCPtr);
1390 if (RT_FAILURE(rc))
1391 {
1392 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1393 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1394 }
1395 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1396}
1397
1398
1399#ifndef REM_PHYS_ADDR_IN_TLB
1400/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1401void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1402{
1403 void *pv;
1404 int rc;
1405
1406 /* Address must be aligned enough to fiddle with lower bits */
1407 Assert((physAddr & 0x3) == 0);
1408
1409 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1410 Assert( rc == VINF_SUCCESS
1411 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1412 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1413 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1414 if (RT_FAILURE(rc))
1415 return (void *)1;
1416 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1417 return (void *)((uintptr_t)pv | 2);
1418 return pv;
1419}
1420#endif /* REM_PHYS_ADDR_IN_TLB */
1421
1422
1423/**
1424 * Called from tlb_protect_code in order to write monitor a code page.
1425 *
1426 * @param env Pointer to the CPU environment.
1427 * @param GCPtr Code page to monitor
1428 */
1429void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1430{
1431#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1432 Assert(env->pVM->rem.s.fInREM);
1433 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1434 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1435 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1436 && !(env->eflags & VM_MASK) /* no V86 mode */
1437 && !HWACCMIsEnabled(env->pVM))
1438 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1439#endif
1440}
1441
1442
1443/**
1444 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1445 *
1446 * @param env Pointer to the CPU environment.
1447 * @param GCPtr Code page to monitor
1448 */
1449void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1450{
1451 Assert(env->pVM->rem.s.fInREM);
1452#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1453 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1454 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1455 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1456 && !(env->eflags & VM_MASK) /* no V86 mode */
1457 && !HWACCMIsEnabled(env->pVM))
1458 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1459#endif
1460}
1461
1462
1463/**
1464 * Called when the CPU is initialized, any of the CRx registers are changed or
1465 * when the A20 line is modified.
1466 *
1467 * @param env Pointer to the CPU environment.
1468 * @param fGlobal Set if the flush is global.
1469 */
1470void remR3FlushTLB(CPUState *env, bool fGlobal)
1471{
1472 PVM pVM = env->pVM;
1473 PCPUMCTX pCtx;
1474
1475 /*
1476 * When we're replaying invlpg instructions or restoring a saved
1477 * state we disable this path.
1478 */
1479 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.fIgnoreAll)
1480 return;
1481 Assert(pVM->rem.s.fInREM);
1482
1483 /*
1484 * The caller doesn't check cr4, so we have to do that for ourselves.
1485 */
1486 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1487 fGlobal = true;
1488 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1489
1490 /*
1491 * Update the control registers before calling PGMR3FlushTLB.
1492 */
1493 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1494 pCtx->cr0 = env->cr[0];
1495 pCtx->cr3 = env->cr[3];
1496 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1497 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1498 pCtx->cr4 = env->cr[4];
1499
1500 /*
1501 * Let PGM do the rest.
1502 */
1503 PGMFlushTLB(pVM, env->cr[3], fGlobal);
1504}
1505
1506
1507/**
1508 * Called when any of the cr0, cr4 or efer registers is updated.
1509 *
1510 * @param env Pointer to the CPU environment.
1511 */
1512void remR3ChangeCpuMode(CPUState *env)
1513{
1514 int rc;
1515 PVM pVM = env->pVM;
1516 PCPUMCTX pCtx;
1517
1518 /*
1519 * When we're replaying loads or restoring a saved
1520 * state this path is disabled.
1521 */
1522 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.fIgnoreAll)
1523 return;
1524 Assert(pVM->rem.s.fInREM);
1525
1526 /*
1527 * Update the control registers before calling PGMChangeMode()
1528 * as it may need to map whatever cr3 is pointing to.
1529 */
1530 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1531 pCtx->cr0 = env->cr[0];
1532 pCtx->cr3 = env->cr[3];
1533 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1534 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
1535 pCtx->cr4 = env->cr[4];
1536
1537#ifdef TARGET_X86_64
1538 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], env->efer);
1539 if (rc != VINF_SUCCESS)
1540 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], env->efer, rc);
1541#else
1542 rc = PGMChangeMode(pVM, env->cr[0], env->cr[4], 0);
1543 if (rc != VINF_SUCCESS)
1544 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], 0LL, rc);
1545#endif
1546}
1547
1548
1549/**
1550 * Called from compiled code to run dma.
1551 *
1552 * @param env Pointer to the CPU environment.
1553 */
1554void remR3DmaRun(CPUState *env)
1555{
1556 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1557 PDMR3DmaRun(env->pVM);
1558 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1559}
1560
1561
1562/**
1563 * Called from compiled code to schedule pending timers in VMM
1564 *
1565 * @param env Pointer to the CPU environment.
1566 */
1567void remR3TimersRun(CPUState *env)
1568{
1569 LogFlow(("remR3TimersRun:\n"));
1570 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1571 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1572 TMR3TimerQueuesDo(env->pVM);
1573 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1574 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1575}
1576
1577
1578/**
1579 * Record trap occurance
1580 *
1581 * @returns VBox status code
1582 * @param env Pointer to the CPU environment.
1583 * @param uTrap Trap nr
1584 * @param uErrorCode Error code
1585 * @param pvNextEIP Next EIP
1586 */
1587int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1588{
1589 PVM pVM = env->pVM;
1590#ifdef VBOX_WITH_STATISTICS
1591 static STAMCOUNTER s_aStatTrap[255];
1592 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1593#endif
1594
1595#ifdef VBOX_WITH_STATISTICS
1596 if (uTrap < 255)
1597 {
1598 if (!s_aRegisters[uTrap])
1599 {
1600 char szStatName[64];
1601 s_aRegisters[uTrap] = true;
1602 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1603 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1604 }
1605 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1606 }
1607#endif
1608 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1609 if( uTrap < 0x20
1610 && (env->cr[0] & X86_CR0_PE)
1611 && !(env->eflags & X86_EFL_VM))
1612 {
1613#ifdef DEBUG
1614 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1615#endif
1616 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1617 {
1618 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1619 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1620 return VERR_REM_TOO_MANY_TRAPS;
1621 }
1622 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1623 pVM->rem.s.cPendingExceptions = 1;
1624 pVM->rem.s.uPendingException = uTrap;
1625 pVM->rem.s.uPendingExcptEIP = env->eip;
1626 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1627 }
1628 else
1629 {
1630 pVM->rem.s.cPendingExceptions = 0;
1631 pVM->rem.s.uPendingException = uTrap;
1632 pVM->rem.s.uPendingExcptEIP = env->eip;
1633 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1634 }
1635 return VINF_SUCCESS;
1636}
1637
1638
1639/*
1640 * Clear current active trap
1641 *
1642 * @param pVM VM Handle.
1643 */
1644void remR3TrapClear(PVM pVM)
1645{
1646 pVM->rem.s.cPendingExceptions = 0;
1647 pVM->rem.s.uPendingException = 0;
1648 pVM->rem.s.uPendingExcptEIP = 0;
1649 pVM->rem.s.uPendingExcptCR2 = 0;
1650}
1651
1652
1653/*
1654 * Record previous call instruction addresses
1655 *
1656 * @param env Pointer to the CPU environment.
1657 */
1658void remR3RecordCall(CPUState *env)
1659{
1660 CSAMR3RecordCallAddress(env->pVM, env->eip);
1661}
1662
1663
1664/**
1665 * Syncs the internal REM state with the VM.
1666 *
1667 * This must be called before REMR3Run() is invoked whenever when the REM
1668 * state is not up to date. Calling it several times in a row is not
1669 * permitted.
1670 *
1671 * @returns VBox status code.
1672 *
1673 * @param pVM VM Handle.
1674 *
1675 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1676 * no do this since the majority of the callers don't want any unnecessary of events
1677 * pending that would immediatly interrupt execution.
1678 */
1679REMR3DECL(int) REMR3State(PVM pVM)
1680{
1681 register const CPUMCTX *pCtx;
1682 register unsigned fFlags;
1683 bool fHiddenSelRegsValid;
1684 unsigned i;
1685 TRPMEVENT enmType;
1686 uint8_t u8TrapNo;
1687 int rc;
1688
1689 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1690 Log2(("REMR3State:\n"));
1691
1692 pCtx = pVM->rem.s.pCtx;
1693 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1694
1695 Assert(!pVM->rem.s.fInREM);
1696 pVM->rem.s.fInStateSync = true;
1697
1698 /*
1699 * If we have to flush TBs, do that immediately.
1700 */
1701 if (pVM->rem.s.fFlushTBs)
1702 {
1703 STAM_COUNTER_INC(&gStatFlushTBs);
1704 tb_flush(&pVM->rem.s.Env);
1705 pVM->rem.s.fFlushTBs = false;
1706 }
1707
1708 /*
1709 * Copy the registers which require no special handling.
1710 */
1711#ifdef TARGET_X86_64
1712 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1713 Assert(R_EAX == 0);
1714 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1715 Assert(R_ECX == 1);
1716 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1717 Assert(R_EDX == 2);
1718 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1719 Assert(R_EBX == 3);
1720 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1721 Assert(R_ESP == 4);
1722 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1723 Assert(R_EBP == 5);
1724 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1725 Assert(R_ESI == 6);
1726 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1727 Assert(R_EDI == 7);
1728 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1729 pVM->rem.s.Env.regs[8] = pCtx->r8;
1730 pVM->rem.s.Env.regs[9] = pCtx->r9;
1731 pVM->rem.s.Env.regs[10] = pCtx->r10;
1732 pVM->rem.s.Env.regs[11] = pCtx->r11;
1733 pVM->rem.s.Env.regs[12] = pCtx->r12;
1734 pVM->rem.s.Env.regs[13] = pCtx->r13;
1735 pVM->rem.s.Env.regs[14] = pCtx->r14;
1736 pVM->rem.s.Env.regs[15] = pCtx->r15;
1737
1738 pVM->rem.s.Env.eip = pCtx->rip;
1739
1740 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1741#else
1742 Assert(R_EAX == 0);
1743 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1744 Assert(R_ECX == 1);
1745 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1746 Assert(R_EDX == 2);
1747 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1748 Assert(R_EBX == 3);
1749 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1750 Assert(R_ESP == 4);
1751 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1752 Assert(R_EBP == 5);
1753 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1754 Assert(R_ESI == 6);
1755 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1756 Assert(R_EDI == 7);
1757 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1758 pVM->rem.s.Env.eip = pCtx->eip;
1759
1760 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1761#endif
1762
1763 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1764
1765 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1766 for (i=0;i<8;i++)
1767 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1768
1769 /*
1770 * Clear the halted hidden flag (the interrupt waking up the CPU can
1771 * have been dispatched in raw mode).
1772 */
1773 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1774
1775 /*
1776 * Replay invlpg?
1777 */
1778 if (pVM->rem.s.cInvalidatedPages)
1779 {
1780 RTUINT i;
1781
1782 pVM->rem.s.fIgnoreInvlPg = true;
1783 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1784 {
1785 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1786 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1787 }
1788 pVM->rem.s.fIgnoreInvlPg = false;
1789 pVM->rem.s.cInvalidatedPages = 0;
1790 }
1791
1792 /* Replay notification changes? */
1793 if (pVM->rem.s.cHandlerNotifications)
1794 REMR3ReplayHandlerNotifications(pVM);
1795
1796 /* Update MSRs; before CRx registers! */
1797 pVM->rem.s.Env.efer = pCtx->msrEFER;
1798 pVM->rem.s.Env.star = pCtx->msrSTAR;
1799 pVM->rem.s.Env.pat = pCtx->msrPAT;
1800#ifdef TARGET_X86_64
1801 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1802 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1803 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1804 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1805
1806 /* Update the internal long mode activate flag according to the new EFER value. */
1807 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1808 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1809 else
1810 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1811#endif
1812
1813 /*
1814 * Registers which are rarely changed and require special handling / order when changed.
1815 */
1816 fFlags = CPUMGetAndClearChangedFlagsREM(pVM);
1817 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1818 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1819 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1820 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1821 {
1822 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1823 {
1824 pVM->rem.s.fIgnoreCR3Load = true;
1825 tlb_flush(&pVM->rem.s.Env, true);
1826 pVM->rem.s.fIgnoreCR3Load = false;
1827 }
1828
1829 /* CR4 before CR0! */
1830 if (fFlags & CPUM_CHANGED_CR4)
1831 {
1832 pVM->rem.s.fIgnoreCR3Load = true;
1833 pVM->rem.s.fIgnoreCpuMode = true;
1834 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1835 pVM->rem.s.fIgnoreCpuMode = false;
1836 pVM->rem.s.fIgnoreCR3Load = false;
1837 }
1838
1839 if (fFlags & CPUM_CHANGED_CR0)
1840 {
1841 pVM->rem.s.fIgnoreCR3Load = true;
1842 pVM->rem.s.fIgnoreCpuMode = true;
1843 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1844 pVM->rem.s.fIgnoreCpuMode = false;
1845 pVM->rem.s.fIgnoreCR3Load = false;
1846 }
1847
1848 if (fFlags & CPUM_CHANGED_CR3)
1849 {
1850 pVM->rem.s.fIgnoreCR3Load = true;
1851 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1852 pVM->rem.s.fIgnoreCR3Load = false;
1853 }
1854
1855 if (fFlags & CPUM_CHANGED_GDTR)
1856 {
1857 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1858 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1859 }
1860
1861 if (fFlags & CPUM_CHANGED_IDTR)
1862 {
1863 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1864 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1865 }
1866
1867 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1868 {
1869 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1870 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1871 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1872 }
1873
1874 if (fFlags & CPUM_CHANGED_LDTR)
1875 {
1876 if (fHiddenSelRegsValid)
1877 {
1878 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1879 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1880 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1881 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1882 }
1883 else
1884 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
1885 }
1886
1887 if (fFlags & CPUM_CHANGED_CPUID)
1888 {
1889 uint32_t u32Dummy;
1890
1891 /*
1892 * Get the CPUID features.
1893 */
1894 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
1895 CPUMGetGuestCpuId(pVM, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
1896 }
1897
1898 /* Sync FPU state after CR4, CPUID and EFER (!). */
1899 if (fFlags & CPUM_CHANGED_FPU_REM)
1900 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
1901 }
1902
1903 /*
1904 * Sync TR unconditionally to make life simpler.
1905 */
1906 pVM->rem.s.Env.tr.selector = pCtx->tr;
1907 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
1908 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
1909 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
1910 /* Note! do_interrupt will fault if the busy flag is still set... */
1911 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
1912
1913 /*
1914 * Update selector registers.
1915 * This must be done *after* we've synced gdt, ldt and crX registers
1916 * since we're reading the GDT/LDT om sync_seg. This will happen with
1917 * saved state which takes a quick dip into rawmode for instance.
1918 */
1919 /*
1920 * Stack; Note first check this one as the CPL might have changed. The
1921 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
1922 */
1923
1924 if (fHiddenSelRegsValid)
1925 {
1926 /* The hidden selector registers are valid in the CPU context. */
1927 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
1928
1929 /* Set current CPL */
1930 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1931
1932 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
1933 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
1934 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
1935 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
1936 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
1937 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
1938 }
1939 else
1940 {
1941 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
1942 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
1943 {
1944 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
1945
1946 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)));
1947 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
1948#ifdef VBOX_WITH_STATISTICS
1949 if (pVM->rem.s.Env.segs[R_SS].newselector)
1950 {
1951 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
1952 }
1953#endif
1954 }
1955 else
1956 pVM->rem.s.Env.segs[R_SS].newselector = 0;
1957
1958 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
1959 {
1960 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
1961 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
1962#ifdef VBOX_WITH_STATISTICS
1963 if (pVM->rem.s.Env.segs[R_ES].newselector)
1964 {
1965 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
1966 }
1967#endif
1968 }
1969 else
1970 pVM->rem.s.Env.segs[R_ES].newselector = 0;
1971
1972 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
1973 {
1974 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
1975 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
1976#ifdef VBOX_WITH_STATISTICS
1977 if (pVM->rem.s.Env.segs[R_CS].newselector)
1978 {
1979 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
1980 }
1981#endif
1982 }
1983 else
1984 pVM->rem.s.Env.segs[R_CS].newselector = 0;
1985
1986 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
1987 {
1988 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
1989 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
1990#ifdef VBOX_WITH_STATISTICS
1991 if (pVM->rem.s.Env.segs[R_DS].newselector)
1992 {
1993 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
1994 }
1995#endif
1996 }
1997 else
1998 pVM->rem.s.Env.segs[R_DS].newselector = 0;
1999
2000 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2001 * be the same but not the base/limit. */
2002 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2003 {
2004 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2005 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2006#ifdef VBOX_WITH_STATISTICS
2007 if (pVM->rem.s.Env.segs[R_FS].newselector)
2008 {
2009 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2010 }
2011#endif
2012 }
2013 else
2014 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2015
2016 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2017 {
2018 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2019 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2020#ifdef VBOX_WITH_STATISTICS
2021 if (pVM->rem.s.Env.segs[R_GS].newselector)
2022 {
2023 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2024 }
2025#endif
2026 }
2027 else
2028 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2029 }
2030
2031 /*
2032 * Check for traps.
2033 */
2034 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2035 rc = TRPMQueryTrap(pVM, &u8TrapNo, &enmType);
2036 if (RT_SUCCESS(rc))
2037 {
2038#ifdef DEBUG
2039 if (u8TrapNo == 0x80)
2040 {
2041 remR3DumpLnxSyscall(pVM);
2042 remR3DumpOBsdSyscall(pVM);
2043 }
2044#endif
2045
2046 pVM->rem.s.Env.exception_index = u8TrapNo;
2047 if (enmType != TRPM_SOFTWARE_INT)
2048 {
2049 pVM->rem.s.Env.exception_is_int = 0;
2050 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2051 }
2052 else
2053 {
2054 /*
2055 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2056 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2057 * for int03 and into.
2058 */
2059 pVM->rem.s.Env.exception_is_int = 1;
2060 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2061 /* int 3 may be generated by one-byte 0xcc */
2062 if (u8TrapNo == 3)
2063 {
2064 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2065 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2066 }
2067 /* int 4 may be generated by one-byte 0xce */
2068 else if (u8TrapNo == 4)
2069 {
2070 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2071 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2072 }
2073 }
2074
2075 /* get error code and cr2 if needed. */
2076 switch (u8TrapNo)
2077 {
2078 case 0x0e:
2079 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVM);
2080 /* fallthru */
2081 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2082 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVM);
2083 break;
2084
2085 case 0x11: case 0x08:
2086 default:
2087 pVM->rem.s.Env.error_code = 0;
2088 break;
2089 }
2090
2091 /*
2092 * We can now reset the active trap since the recompiler is gonna have a go at it.
2093 */
2094 rc = TRPMResetTrap(pVM);
2095 AssertRC(rc);
2096 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2097 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2098 }
2099
2100 /*
2101 * Clear old interrupt request flags; Check for pending hardware interrupts.
2102 * (See @remark for why we don't check for other FFs.)
2103 */
2104 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2105 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2106 || VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
2107 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2108
2109 /*
2110 * We're now in REM mode.
2111 */
2112 pVM->rem.s.fInREM = true;
2113 pVM->rem.s.fInStateSync = false;
2114 pVM->rem.s.cCanExecuteRaw = 0;
2115 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2116 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2117 return VINF_SUCCESS;
2118}
2119
2120
2121/**
2122 * Syncs back changes in the REM state to the the VM state.
2123 *
2124 * This must be called after invoking REMR3Run().
2125 * Calling it several times in a row is not permitted.
2126 *
2127 * @returns VBox status code.
2128 *
2129 * @param pVM VM Handle.
2130 */
2131REMR3DECL(int) REMR3StateBack(PVM pVM)
2132{
2133 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2134 unsigned i;
2135
2136 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2137 Log2(("REMR3StateBack:\n"));
2138 Assert(pVM->rem.s.fInREM);
2139
2140 /*
2141 * Copy back the registers.
2142 * This is done in the order they are declared in the CPUMCTX structure.
2143 */
2144
2145 /** @todo FOP */
2146 /** @todo FPUIP */
2147 /** @todo CS */
2148 /** @todo FPUDP */
2149 /** @todo DS */
2150
2151 /** @todo check if FPU/XMM was actually used in the recompiler */
2152 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2153//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2154
2155#ifdef TARGET_X86_64
2156 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2157 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2158 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2159 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2160 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2161 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2162 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2163 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2164 pCtx->r8 = pVM->rem.s.Env.regs[8];
2165 pCtx->r9 = pVM->rem.s.Env.regs[9];
2166 pCtx->r10 = pVM->rem.s.Env.regs[10];
2167 pCtx->r11 = pVM->rem.s.Env.regs[11];
2168 pCtx->r12 = pVM->rem.s.Env.regs[12];
2169 pCtx->r13 = pVM->rem.s.Env.regs[13];
2170 pCtx->r14 = pVM->rem.s.Env.regs[14];
2171 pCtx->r15 = pVM->rem.s.Env.regs[15];
2172
2173 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2174
2175#else
2176 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2177 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2178 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2179 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2180 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2181 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2182 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2183
2184 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2185#endif
2186
2187 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2188
2189#ifdef VBOX_WITH_STATISTICS
2190 if (pVM->rem.s.Env.segs[R_SS].newselector)
2191 {
2192 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2193 }
2194 if (pVM->rem.s.Env.segs[R_GS].newselector)
2195 {
2196 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2197 }
2198 if (pVM->rem.s.Env.segs[R_FS].newselector)
2199 {
2200 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2201 }
2202 if (pVM->rem.s.Env.segs[R_ES].newselector)
2203 {
2204 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2205 }
2206 if (pVM->rem.s.Env.segs[R_DS].newselector)
2207 {
2208 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2209 }
2210 if (pVM->rem.s.Env.segs[R_CS].newselector)
2211 {
2212 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2213 }
2214#endif
2215 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2216 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2217 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2218 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2219 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2220
2221#ifdef TARGET_X86_64
2222 pCtx->rip = pVM->rem.s.Env.eip;
2223 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2224#else
2225 pCtx->eip = pVM->rem.s.Env.eip;
2226 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2227#endif
2228
2229 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2230 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2231 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2232 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2233 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2234 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2235
2236 for (i = 0; i < 8; i++)
2237 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2238
2239 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2240 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2241 {
2242 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2243 STAM_COUNTER_INC(&gStatREMGDTChange);
2244 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2245 }
2246
2247 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2248 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2249 {
2250 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2251 STAM_COUNTER_INC(&gStatREMIDTChange);
2252 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2253 }
2254
2255 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2256 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2257 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2258 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2259 {
2260 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2261 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2262 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2263 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2264 STAM_COUNTER_INC(&gStatREMLDTRChange);
2265 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2266 }
2267
2268 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2269 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2270 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2271 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2272 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2273 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2274 : 0) )
2275 {
2276 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2277 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2278 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2279 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2280 pCtx->tr = pVM->rem.s.Env.tr.selector;
2281 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2282 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2283 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2284 if (pCtx->trHid.Attr.u)
2285 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2286 STAM_COUNTER_INC(&gStatREMTRChange);
2287 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2288 }
2289
2290 /** @todo These values could still be out of sync! */
2291 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2292 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2293 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2294 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2295
2296 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2297 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2298 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2299
2300 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2301 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2302 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2303
2304 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2305 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2306 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2307
2308 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2309 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2310 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2311
2312 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2313 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2314 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2315
2316 /* Sysenter MSR */
2317 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2318 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2319 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2320
2321 /* System MSRs. */
2322 pCtx->msrEFER = pVM->rem.s.Env.efer;
2323 pCtx->msrSTAR = pVM->rem.s.Env.star;
2324 pCtx->msrPAT = pVM->rem.s.Env.pat;
2325#ifdef TARGET_X86_64
2326 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2327 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2328 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2329 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2330#endif
2331
2332 remR3TrapClear(pVM);
2333
2334 /*
2335 * Check for traps.
2336 */
2337 if ( pVM->rem.s.Env.exception_index >= 0
2338 && pVM->rem.s.Env.exception_index < 256)
2339 {
2340 int rc;
2341
2342 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2343 rc = TRPMAssertTrap(pVM, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2344 AssertRC(rc);
2345 switch (pVM->rem.s.Env.exception_index)
2346 {
2347 case 0x0e:
2348 TRPMSetFaultAddress(pVM, pCtx->cr2);
2349 /* fallthru */
2350 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2351 case 0x11: case 0x08: /* 0 */
2352 TRPMSetErrorCode(pVM, pVM->rem.s.Env.error_code);
2353 break;
2354 }
2355
2356 }
2357
2358 /*
2359 * We're not longer in REM mode.
2360 */
2361 pVM->rem.s.fInREM = false;
2362 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2363 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2364 return VINF_SUCCESS;
2365}
2366
2367
2368/**
2369 * This is called by the disassembler when it wants to update the cpu state
2370 * before for instance doing a register dump.
2371 */
2372static void remR3StateUpdate(PVM pVM)
2373{
2374 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2375 unsigned i;
2376
2377 Assert(pVM->rem.s.fInREM);
2378
2379 /*
2380 * Copy back the registers.
2381 * This is done in the order they are declared in the CPUMCTX structure.
2382 */
2383
2384 /** @todo FOP */
2385 /** @todo FPUIP */
2386 /** @todo CS */
2387 /** @todo FPUDP */
2388 /** @todo DS */
2389 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2390 pCtx->fpu.MXCSR = 0;
2391 pCtx->fpu.MXCSR_MASK = 0;
2392
2393 /** @todo check if FPU/XMM was actually used in the recompiler */
2394 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2395//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2396
2397#ifdef TARGET_X86_64
2398 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2399 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2400 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2401 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2402 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2403 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2404 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2405 pCtx->r8 = pVM->rem.s.Env.regs[8];
2406 pCtx->r9 = pVM->rem.s.Env.regs[9];
2407 pCtx->r10 = pVM->rem.s.Env.regs[10];
2408 pCtx->r11 = pVM->rem.s.Env.regs[11];
2409 pCtx->r12 = pVM->rem.s.Env.regs[12];
2410 pCtx->r13 = pVM->rem.s.Env.regs[13];
2411 pCtx->r14 = pVM->rem.s.Env.regs[14];
2412 pCtx->r15 = pVM->rem.s.Env.regs[15];
2413
2414 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2415#else
2416 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2417 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2418 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2419 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2420 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2421 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2422 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2423
2424 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2425#endif
2426
2427 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2428
2429 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2430 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2431 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2432 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2433 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2434
2435#ifdef TARGET_X86_64
2436 pCtx->rip = pVM->rem.s.Env.eip;
2437 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2438#else
2439 pCtx->eip = pVM->rem.s.Env.eip;
2440 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2441#endif
2442
2443 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2444 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2445 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2446 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2447 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2448 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2449
2450 for (i = 0; i < 8; i++)
2451 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2452
2453 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2454 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2455 {
2456 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2457 STAM_COUNTER_INC(&gStatREMGDTChange);
2458 VM_FF_SET(pVM, VM_FF_SELM_SYNC_GDT);
2459 }
2460
2461 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2462 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2463 {
2464 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2465 STAM_COUNTER_INC(&gStatREMIDTChange);
2466 VM_FF_SET(pVM, VM_FF_TRPM_SYNC_IDT);
2467 }
2468
2469 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2470 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2471 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2472 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2473 {
2474 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2475 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2476 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2477 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2478 STAM_COUNTER_INC(&gStatREMLDTRChange);
2479 VM_FF_SET(pVM, VM_FF_SELM_SYNC_LDT);
2480 }
2481
2482 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2483 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2484 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2485 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2486 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2487 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2488 : 0) )
2489 {
2490 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2491 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2492 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2493 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2494 pCtx->tr = pVM->rem.s.Env.tr.selector;
2495 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2496 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2497 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2498 if (pCtx->trHid.Attr.u)
2499 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2500 STAM_COUNTER_INC(&gStatREMTRChange);
2501 VM_FF_SET(pVM, VM_FF_SELM_SYNC_TSS);
2502 }
2503
2504 /** @todo These values could still be out of sync! */
2505 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2506 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2507 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2508 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2509
2510 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2511 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2512 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2513
2514 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2515 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2516 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2517
2518 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2519 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2520 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2521
2522 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2523 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2524 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2525
2526 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2527 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2528 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2529
2530 /* Sysenter MSR */
2531 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2532 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2533 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2534
2535 /* System MSRs. */
2536 pCtx->msrEFER = pVM->rem.s.Env.efer;
2537 pCtx->msrSTAR = pVM->rem.s.Env.star;
2538 pCtx->msrPAT = pVM->rem.s.Env.pat;
2539#ifdef TARGET_X86_64
2540 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2541 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2542 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2543 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2544#endif
2545
2546}
2547
2548
2549/**
2550 * Update the VMM state information if we're currently in REM.
2551 *
2552 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2553 * we're currently executing in REM and the VMM state is invalid. This method will of
2554 * course check that we're executing in REM before syncing any data over to the VMM.
2555 *
2556 * @param pVM The VM handle.
2557 */
2558REMR3DECL(void) REMR3StateUpdate(PVM pVM)
2559{
2560 if (pVM->rem.s.fInREM)
2561 remR3StateUpdate(pVM);
2562}
2563
2564
2565#undef LOG_GROUP
2566#define LOG_GROUP LOG_GROUP_REM
2567
2568
2569/**
2570 * Notify the recompiler about Address Gate 20 state change.
2571 *
2572 * This notification is required since A20 gate changes are
2573 * initialized from a device driver and the VM might just as
2574 * well be in REM mode as in RAW mode.
2575 *
2576 * @param pVM VM handle.
2577 * @param fEnable True if the gate should be enabled.
2578 * False if the gate should be disabled.
2579 */
2580REMR3DECL(void) REMR3A20Set(PVM pVM, bool fEnable)
2581{
2582 bool fSaved;
2583
2584 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2585 VM_ASSERT_EMT(pVM);
2586
2587 fSaved = pVM->rem.s.fIgnoreAll; /* just in case. */
2588 pVM->rem.s.fIgnoreAll = fSaved || !pVM->rem.s.fInREM;
2589
2590 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2591
2592 pVM->rem.s.fIgnoreAll = fSaved;
2593}
2594
2595
2596/**
2597 * Replays the invalidated recorded pages.
2598 * Called in response to VERR_REM_FLUSHED_PAGES_OVERFLOW from the RAW execution loop.
2599 *
2600 * @param pVM VM handle.
2601 */
2602REMR3DECL(void) REMR3ReplayInvalidatedPages(PVM pVM)
2603{
2604 RTUINT i;
2605
2606 VM_ASSERT_EMT(pVM);
2607
2608 /*
2609 * Sync the required registers.
2610 */
2611 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2612 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2613 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2614 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2615
2616 /*
2617 * Replay the flushes.
2618 */
2619 pVM->rem.s.fIgnoreInvlPg = true;
2620 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2621 {
2622 Log2(("REMR3ReplayInvalidatedPages: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2623 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2624 }
2625 pVM->rem.s.fIgnoreInvlPg = false;
2626 pVM->rem.s.cInvalidatedPages = 0;
2627}
2628
2629
2630/**
2631 * Replays the handler notification changes
2632 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2633 *
2634 * @param pVM VM handle.
2635 */
2636REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2637{
2638 /*
2639 * Replay the flushes.
2640 */
2641 RTUINT i;
2642 const RTUINT c = pVM->rem.s.cHandlerNotifications;
2643
2644 LogFlow(("REMR3ReplayInvalidatedPages:\n"));
2645 VM_ASSERT_EMT(pVM);
2646
2647 pVM->rem.s.cHandlerNotifications = 0;
2648 for (i = 0; i < c; i++)
2649 {
2650 PREMHANDLERNOTIFICATION pRec = &pVM->rem.s.aHandlerNotifications[i];
2651 switch (pRec->enmKind)
2652 {
2653 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2654 REMR3NotifyHandlerPhysicalRegister(pVM,
2655 pRec->u.PhysicalRegister.enmType,
2656 pRec->u.PhysicalRegister.GCPhys,
2657 pRec->u.PhysicalRegister.cb,
2658 pRec->u.PhysicalRegister.fHasHCHandler);
2659 break;
2660
2661 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2662 REMR3NotifyHandlerPhysicalDeregister(pVM,
2663 pRec->u.PhysicalDeregister.enmType,
2664 pRec->u.PhysicalDeregister.GCPhys,
2665 pRec->u.PhysicalDeregister.cb,
2666 pRec->u.PhysicalDeregister.fHasHCHandler,
2667 pRec->u.PhysicalDeregister.fRestoreAsRAM);
2668 break;
2669
2670 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2671 REMR3NotifyHandlerPhysicalModify(pVM,
2672 pRec->u.PhysicalModify.enmType,
2673 pRec->u.PhysicalModify.GCPhysOld,
2674 pRec->u.PhysicalModify.GCPhysNew,
2675 pRec->u.PhysicalModify.cb,
2676 pRec->u.PhysicalModify.fHasHCHandler,
2677 pRec->u.PhysicalModify.fRestoreAsRAM);
2678 break;
2679
2680 default:
2681 AssertReleaseMsgFailed(("enmKind=%d\n", pRec->enmKind));
2682 break;
2683 }
2684 }
2685 VM_FF_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY);
2686}
2687
2688
2689/**
2690 * Notify REM about changed code page.
2691 *
2692 * @returns VBox status code.
2693 * @param pVM VM handle.
2694 * @param pvCodePage Code page address
2695 */
2696REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, RTGCPTR pvCodePage)
2697{
2698#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2699 int rc;
2700 RTGCPHYS PhysGC;
2701 uint64_t flags;
2702
2703 VM_ASSERT_EMT(pVM);
2704
2705 /*
2706 * Get the physical page address.
2707 */
2708 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2709 if (rc == VINF_SUCCESS)
2710 {
2711 /*
2712 * Sync the required registers and flush the whole page.
2713 * (Easier to do the whole page than notifying it about each physical
2714 * byte that was changed.
2715 */
2716 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2717 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2718 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2719 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2720
2721 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2722 }
2723#endif
2724 return VINF_SUCCESS;
2725}
2726
2727
2728/**
2729 * Notification about a successful MMR3PhysRegister() call.
2730 *
2731 * @param pVM VM handle.
2732 * @param GCPhys The physical address the RAM.
2733 * @param cb Size of the memory.
2734 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2735 */
2736REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2737{
2738 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2739 VM_ASSERT_EMT(pVM);
2740
2741 /*
2742 * Validate input - we trust the caller.
2743 */
2744 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2745 Assert(cb);
2746 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2747#ifdef VBOX_WITH_NEW_PHYS_CODE
2748 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2749#endif
2750
2751 /*
2752 * Base ram? Update GCPhysLastRam.
2753 */
2754#ifdef VBOX_WITH_NEW_PHYS_CODE
2755 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2756#else
2757 if (!GCPhys)
2758#endif
2759 {
2760 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2761 {
2762 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2763 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2764 }
2765 }
2766
2767 /*
2768 * Register the ram.
2769 */
2770 Assert(!pVM->rem.s.fIgnoreAll);
2771 pVM->rem.s.fIgnoreAll = true;
2772
2773#ifdef VBOX_WITH_NEW_PHYS_CODE
2774 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2775#else
2776 if (!GCPhys)
2777 cpu_register_physical_memory(GCPhys, cb, GCPhys | IO_MEM_RAM_MISSING);
2778 else
2779 {
2780 if (fFlags & MM_RAM_FLAGS_RESERVED)
2781 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2782 else
2783 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2784 }
2785#endif
2786 Assert(pVM->rem.s.fIgnoreAll);
2787 pVM->rem.s.fIgnoreAll = false;
2788}
2789
2790#ifndef VBOX_WITH_NEW_PHYS_CODE
2791
2792/**
2793 * Notification about a successful PGMR3PhysRegisterChunk() call.
2794 *
2795 * @param pVM VM handle.
2796 * @param GCPhys The physical address the RAM.
2797 * @param cb Size of the memory.
2798 * @param pvRam The HC address of the RAM.
2799 * @param fFlags Flags of the MM_RAM_FLAGS_* defines.
2800 */
2801REMR3DECL(void) REMR3NotifyPhysRamChunkRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, RTHCUINTPTR pvRam, unsigned fFlags)
2802{
2803 Log(("REMR3NotifyPhysRamChunkRegister: GCPhys=%RGp cb=%d pvRam=%p fFlags=%d\n", GCPhys, cb, pvRam, fFlags));
2804 VM_ASSERT_EMT(pVM);
2805
2806 /*
2807 * Validate input - we trust the caller.
2808 */
2809 Assert(pvRam);
2810 Assert(RT_ALIGN(pvRam, PAGE_SIZE) == pvRam);
2811 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2812 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
2813 Assert(fFlags == 0 /* normal RAM */);
2814 Assert(!pVM->rem.s.fIgnoreAll);
2815 pVM->rem.s.fIgnoreAll = true;
2816 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2817 Assert(pVM->rem.s.fIgnoreAll);
2818 pVM->rem.s.fIgnoreAll = false;
2819}
2820
2821
2822/**
2823 * Grows dynamically allocated guest RAM.
2824 * Will raise a fatal error if the operation fails.
2825 *
2826 * @param physaddr The physical address.
2827 */
2828void remR3GrowDynRange(unsigned long physaddr) /** @todo Needs fixing for MSC... */
2829{
2830 int rc;
2831 PVM pVM = cpu_single_env->pVM;
2832 const RTGCPHYS GCPhys = physaddr;
2833
2834 LogFlow(("remR3GrowDynRange %RGp\n", (RTGCPTR)physaddr));
2835 rc = PGM3PhysGrowRange(pVM, &GCPhys);
2836 if (RT_SUCCESS(rc))
2837 return;
2838
2839 LogRel(("\nUnable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr));
2840 cpu_abort(cpu_single_env, "Unable to allocate guest RAM chunk at %RGp\n", (RTGCPTR)physaddr);
2841 AssertFatalFailed();
2842}
2843
2844#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2845
2846/**
2847 * Notification about a successful MMR3PhysRomRegister() call.
2848 *
2849 * @param pVM VM handle.
2850 * @param GCPhys The physical address of the ROM.
2851 * @param cb The size of the ROM.
2852 * @param pvCopy Pointer to the ROM copy.
2853 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2854 * This function will be called when ever the protection of the
2855 * shadow ROM changes (at reset and end of POST).
2856 */
2857REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2858{
2859 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2860 VM_ASSERT_EMT(pVM);
2861
2862 /*
2863 * Validate input - we trust the caller.
2864 */
2865 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2866 Assert(cb);
2867 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2868
2869 /*
2870 * Register the rom.
2871 */
2872 Assert(!pVM->rem.s.fIgnoreAll);
2873 pVM->rem.s.fIgnoreAll = true;
2874
2875 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2876
2877 Assert(pVM->rem.s.fIgnoreAll);
2878 pVM->rem.s.fIgnoreAll = false;
2879}
2880
2881
2882/**
2883 * Notification about a successful memory deregistration or reservation.
2884 *
2885 * @param pVM VM Handle.
2886 * @param GCPhys Start physical address.
2887 * @param cb The size of the range.
2888 */
2889REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2890{
2891 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2892 VM_ASSERT_EMT(pVM);
2893
2894 /*
2895 * Validate input - we trust the caller.
2896 */
2897 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2898 Assert(cb);
2899 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2900
2901 /*
2902 * Unassigning the memory.
2903 */
2904 Assert(!pVM->rem.s.fIgnoreAll);
2905 pVM->rem.s.fIgnoreAll = true;
2906
2907 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2908
2909 Assert(pVM->rem.s.fIgnoreAll);
2910 pVM->rem.s.fIgnoreAll = false;
2911}
2912
2913
2914/**
2915 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2916 *
2917 * @param pVM VM Handle.
2918 * @param enmType Handler type.
2919 * @param GCPhys Handler range address.
2920 * @param cb Size of the handler range.
2921 * @param fHasHCHandler Set if the handler has a HC callback function.
2922 *
2923 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2924 * Handler memory type to memory which has no HC handler.
2925 */
2926REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2927{
2928 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
2929 enmType, GCPhys, cb, fHasHCHandler));
2930 VM_ASSERT_EMT(pVM);
2931 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2932 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2933
2934 if (pVM->rem.s.cHandlerNotifications)
2935 REMR3ReplayHandlerNotifications(pVM);
2936
2937 Assert(!pVM->rem.s.fIgnoreAll);
2938 pVM->rem.s.fIgnoreAll = true;
2939
2940 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2941 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
2942 else if (fHasHCHandler)
2943 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
2944
2945 Assert(pVM->rem.s.fIgnoreAll);
2946 pVM->rem.s.fIgnoreAll = false;
2947}
2948
2949
2950/**
2951 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
2952 *
2953 * @param pVM VM Handle.
2954 * @param enmType Handler type.
2955 * @param GCPhys Handler range address.
2956 * @param cb Size of the handler range.
2957 * @param fHasHCHandler Set if the handler has a HC callback function.
2958 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
2959 */
2960REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
2961{
2962 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
2963 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
2964 VM_ASSERT_EMT(pVM);
2965
2966 if (pVM->rem.s.cHandlerNotifications)
2967 REMR3ReplayHandlerNotifications(pVM);
2968
2969 Assert(!pVM->rem.s.fIgnoreAll);
2970 pVM->rem.s.fIgnoreAll = true;
2971
2972/** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
2973 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
2974 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2975 else if (fHasHCHandler)
2976 {
2977 if (!fRestoreAsRAM)
2978 {
2979 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
2980 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2981 }
2982 else
2983 {
2984 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2985 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
2986 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2987 }
2988 }
2989
2990 Assert(pVM->rem.s.fIgnoreAll);
2991 pVM->rem.s.fIgnoreAll = false;
2992}
2993
2994
2995/**
2996 * Notification about a successful PGMR3HandlerPhysicalModify() call.
2997 *
2998 * @param pVM VM Handle.
2999 * @param enmType Handler type.
3000 * @param GCPhysOld Old handler range address.
3001 * @param GCPhysNew New handler range address.
3002 * @param cb Size of the handler range.
3003 * @param fHasHCHandler Set if the handler has a HC callback function.
3004 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3005 */
3006REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3007{
3008 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3009 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3010 VM_ASSERT_EMT(pVM);
3011 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3012
3013 if (pVM->rem.s.cHandlerNotifications)
3014 REMR3ReplayHandlerNotifications(pVM);
3015
3016 if (fHasHCHandler)
3017 {
3018 Assert(!pVM->rem.s.fIgnoreAll);
3019 pVM->rem.s.fIgnoreAll = true;
3020
3021 /*
3022 * Reset the old page.
3023 */
3024 if (!fRestoreAsRAM)
3025 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3026 else
3027 {
3028 /* This is not perfect, but it'll do for PD monitoring... */
3029 Assert(cb == PAGE_SIZE);
3030 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3031 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3032 }
3033
3034 /*
3035 * Update the new page.
3036 */
3037 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3038 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3039 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3040
3041 Assert(pVM->rem.s.fIgnoreAll);
3042 pVM->rem.s.fIgnoreAll = false;
3043 }
3044}
3045
3046
3047/**
3048 * Checks if we're handling access to this page or not.
3049 *
3050 * @returns true if we're trapping access.
3051 * @returns false if we aren't.
3052 * @param pVM The VM handle.
3053 * @param GCPhys The physical address.
3054 *
3055 * @remark This function will only work correctly in VBOX_STRICT builds!
3056 */
3057REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3058{
3059#ifdef VBOX_STRICT
3060 unsigned long off;
3061 if (pVM->rem.s.cHandlerNotifications)
3062 REMR3ReplayHandlerNotifications(pVM);
3063
3064 off = get_phys_page_offset(GCPhys);
3065 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3066 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3067 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3068#else
3069 return false;
3070#endif
3071}
3072
3073
3074/**
3075 * Deals with a rare case in get_phys_addr_code where the code
3076 * is being monitored.
3077 *
3078 * It could also be an MMIO page, in which case we will raise a fatal error.
3079 *
3080 * @returns The physical address corresponding to addr.
3081 * @param env The cpu environment.
3082 * @param addr The virtual address.
3083 * @param pTLBEntry The TLB entry.
3084 */
3085target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3086 target_ulong addr,
3087 CPUTLBEntry* pTLBEntry,
3088 target_phys_addr_t ioTLBEntry)
3089{
3090 PVM pVM = env->pVM;
3091
3092 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3093 {
3094 /* If code memory is being monitored, appropriate IOTLB entry will have
3095 handler IO type, and addend will provide real physical address, no
3096 matter if we store VA in TLB or not, as handlers are always passed PA */
3097 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3098 return ret;
3099 }
3100 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3101 "*** handlers\n",
3102 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3103 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3104 LogRel(("*** mmio\n"));
3105 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3106 LogRel(("*** phys\n"));
3107 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3108 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3109 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3110 AssertFatalFailed();
3111}
3112
3113/**
3114 * Read guest RAM and ROM.
3115 *
3116 * @param SrcGCPhys The source address (guest physical).
3117 * @param pvDst The destination address.
3118 * @param cb Number of bytes
3119 */
3120void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3121{
3122 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3123 VBOX_CHECK_ADDR(SrcGCPhys);
3124 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3125#ifdef VBOX_DEBUG_PHYS
3126 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3127#endif
3128 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3129}
3130
3131
3132/**
3133 * Read guest RAM and ROM, unsigned 8-bit.
3134 *
3135 * @param SrcGCPhys The source address (guest physical).
3136 */
3137RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3138{
3139 uint8_t val;
3140 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3141 VBOX_CHECK_ADDR(SrcGCPhys);
3142 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3143 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3144#ifdef VBOX_DEBUG_PHYS
3145 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3146#endif
3147 return val;
3148}
3149
3150
3151/**
3152 * Read guest RAM and ROM, signed 8-bit.
3153 *
3154 * @param SrcGCPhys The source address (guest physical).
3155 */
3156RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3157{
3158 int8_t val;
3159 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3160 VBOX_CHECK_ADDR(SrcGCPhys);
3161 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3162 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3163#ifdef VBOX_DEBUG_PHYS
3164 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3165#endif
3166 return val;
3167}
3168
3169
3170/**
3171 * Read guest RAM and ROM, unsigned 16-bit.
3172 *
3173 * @param SrcGCPhys The source address (guest physical).
3174 */
3175RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3176{
3177 uint16_t val;
3178 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3179 VBOX_CHECK_ADDR(SrcGCPhys);
3180 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3181 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3182#ifdef VBOX_DEBUG_PHYS
3183 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3184#endif
3185 return val;
3186}
3187
3188
3189/**
3190 * Read guest RAM and ROM, signed 16-bit.
3191 *
3192 * @param SrcGCPhys The source address (guest physical).
3193 */
3194RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3195{
3196 int16_t val;
3197 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3198 VBOX_CHECK_ADDR(SrcGCPhys);
3199 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3200 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3201#ifdef VBOX_DEBUG_PHYS
3202 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3203#endif
3204 return val;
3205}
3206
3207
3208/**
3209 * Read guest RAM and ROM, unsigned 32-bit.
3210 *
3211 * @param SrcGCPhys The source address (guest physical).
3212 */
3213RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3214{
3215 uint32_t val;
3216 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3217 VBOX_CHECK_ADDR(SrcGCPhys);
3218 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3219 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3220#ifdef VBOX_DEBUG_PHYS
3221 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3222#endif
3223 return val;
3224}
3225
3226
3227/**
3228 * Read guest RAM and ROM, signed 32-bit.
3229 *
3230 * @param SrcGCPhys The source address (guest physical).
3231 */
3232RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3233{
3234 int32_t val;
3235 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3236 VBOX_CHECK_ADDR(SrcGCPhys);
3237 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3238 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3239#ifdef VBOX_DEBUG_PHYS
3240 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3241#endif
3242 return val;
3243}
3244
3245
3246/**
3247 * Read guest RAM and ROM, unsigned 64-bit.
3248 *
3249 * @param SrcGCPhys The source address (guest physical).
3250 */
3251uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3252{
3253 uint64_t val;
3254 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3255 VBOX_CHECK_ADDR(SrcGCPhys);
3256 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3257 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3258#ifdef VBOX_DEBUG_PHYS
3259 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3260#endif
3261 return val;
3262}
3263
3264
3265/**
3266 * Read guest RAM and ROM, signed 64-bit.
3267 *
3268 * @param SrcGCPhys The source address (guest physical).
3269 */
3270int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3271{
3272 int64_t val;
3273 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3274 VBOX_CHECK_ADDR(SrcGCPhys);
3275 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3276 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3277#ifdef VBOX_DEBUG_PHYS
3278 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3279#endif
3280 return val;
3281}
3282
3283
3284/**
3285 * Write guest RAM.
3286 *
3287 * @param DstGCPhys The destination address (guest physical).
3288 * @param pvSrc The source address.
3289 * @param cb Number of bytes to write
3290 */
3291void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3292{
3293 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3294 VBOX_CHECK_ADDR(DstGCPhys);
3295 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3296 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3297#ifdef VBOX_DEBUG_PHYS
3298 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3299#endif
3300}
3301
3302
3303/**
3304 * Write guest RAM, unsigned 8-bit.
3305 *
3306 * @param DstGCPhys The destination address (guest physical).
3307 * @param val Value
3308 */
3309void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3310{
3311 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3312 VBOX_CHECK_ADDR(DstGCPhys);
3313 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3314 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3315#ifdef VBOX_DEBUG_PHYS
3316 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3317#endif
3318}
3319
3320
3321/**
3322 * Write guest RAM, unsigned 8-bit.
3323 *
3324 * @param DstGCPhys The destination address (guest physical).
3325 * @param val Value
3326 */
3327void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3328{
3329 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3330 VBOX_CHECK_ADDR(DstGCPhys);
3331 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3332 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3333#ifdef VBOX_DEBUG_PHYS
3334 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3335#endif
3336}
3337
3338
3339/**
3340 * Write guest RAM, unsigned 32-bit.
3341 *
3342 * @param DstGCPhys The destination address (guest physical).
3343 * @param val Value
3344 */
3345void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3346{
3347 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3348 VBOX_CHECK_ADDR(DstGCPhys);
3349 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3350 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3351#ifdef VBOX_DEBUG_PHYS
3352 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3353#endif
3354}
3355
3356
3357/**
3358 * Write guest RAM, unsigned 64-bit.
3359 *
3360 * @param DstGCPhys The destination address (guest physical).
3361 * @param val Value
3362 */
3363void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3364{
3365 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3366 VBOX_CHECK_ADDR(DstGCPhys);
3367 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3368 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3369#ifdef VBOX_DEBUG_PHYS
3370 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3371#endif
3372}
3373
3374#undef LOG_GROUP
3375#define LOG_GROUP LOG_GROUP_REM_MMIO
3376
3377/** Read MMIO memory. */
3378static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3379{
3380 uint32_t u32 = 0;
3381 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3382 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3383 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3384 return u32;
3385}
3386
3387/** Read MMIO memory. */
3388static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3389{
3390 uint32_t u32 = 0;
3391 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3392 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3393 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3394 return u32;
3395}
3396
3397/** Read MMIO memory. */
3398static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3399{
3400 uint32_t u32 = 0;
3401 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3402 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3403 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3404 return u32;
3405}
3406
3407/** Write to MMIO memory. */
3408static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3409{
3410 int rc;
3411 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3412 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3413 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3414}
3415
3416/** Write to MMIO memory. */
3417static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3418{
3419 int rc;
3420 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3421 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3422 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3423}
3424
3425/** Write to MMIO memory. */
3426static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3427{
3428 int rc;
3429 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3430 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3431 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3432}
3433
3434
3435#undef LOG_GROUP
3436#define LOG_GROUP LOG_GROUP_REM_HANDLER
3437
3438/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3439
3440static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3441{
3442 uint8_t u8;
3443 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3444 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3445 return u8;
3446}
3447
3448static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3449{
3450 uint16_t u16;
3451 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3452 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3453 return u16;
3454}
3455
3456static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3457{
3458 uint32_t u32;
3459 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3460 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3461 return u32;
3462}
3463
3464static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3465{
3466 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3467 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3468}
3469
3470static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3471{
3472 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3473 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3474}
3475
3476static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3477{
3478 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3479 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3480}
3481
3482/* -+- disassembly -+- */
3483
3484#undef LOG_GROUP
3485#define LOG_GROUP LOG_GROUP_REM_DISAS
3486
3487
3488/**
3489 * Enables or disables singled stepped disassembly.
3490 *
3491 * @returns VBox status code.
3492 * @param pVM VM handle.
3493 * @param fEnable To enable set this flag, to disable clear it.
3494 */
3495static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3496{
3497 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3498 VM_ASSERT_EMT(pVM);
3499
3500 if (fEnable)
3501 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3502 else
3503 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3504 return VINF_SUCCESS;
3505}
3506
3507
3508/**
3509 * Enables or disables singled stepped disassembly.
3510 *
3511 * @returns VBox status code.
3512 * @param pVM VM handle.
3513 * @param fEnable To enable set this flag, to disable clear it.
3514 */
3515REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3516{
3517 PVMREQ pReq;
3518 int rc;
3519
3520 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3521 if (VM_IS_EMT(pVM))
3522 return remR3DisasEnableStepping(pVM, fEnable);
3523
3524 rc = VMR3ReqCall(pVM, VMREQDEST_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3525 AssertRC(rc);
3526 if (RT_SUCCESS(rc))
3527 rc = pReq->iStatus;
3528 VMR3ReqFree(pReq);
3529 return rc;
3530}
3531
3532
3533#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3534/**
3535 * External Debugger Command: .remstep [on|off|1|0]
3536 */
3537static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3538{
3539 bool fEnable;
3540 int rc;
3541
3542 /* print status */
3543 if (cArgs == 0)
3544 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3545 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3546
3547 /* convert the argument and change the mode. */
3548 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3549 if (RT_FAILURE(rc))
3550 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3551 rc = REMR3DisasEnableStepping(pVM, fEnable);
3552 if (RT_FAILURE(rc))
3553 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3554 return rc;
3555}
3556#endif
3557
3558
3559/**
3560 * Disassembles one instruction and prints it to the log.
3561 *
3562 * @returns Success indicator.
3563 * @param env Pointer to the recompiler CPU structure.
3564 * @param f32BitCode Indicates that whether or not the code should
3565 * be disassembled as 16 or 32 bit. If -1 the CS
3566 * selector will be inspected.
3567 * @param pszPrefix
3568 */
3569bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3570{
3571 PVM pVM = env->pVM;
3572 const bool fLog = LogIsEnabled();
3573 const bool fLog2 = LogIs2Enabled();
3574 int rc = VINF_SUCCESS;
3575
3576 /*
3577 * Don't bother if there ain't any log output to do.
3578 */
3579 if (!fLog && !fLog2)
3580 return true;
3581
3582 /*
3583 * Update the state so DBGF reads the correct register values.
3584 */
3585 remR3StateUpdate(pVM);
3586
3587 /*
3588 * Log registers if requested.
3589 */
3590 if (!fLog2)
3591 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3592
3593 /*
3594 * Disassemble to log.
3595 */
3596 if (fLog)
3597 rc = DBGFR3DisasInstrCurrentLogInternal(pVM, pszPrefix);
3598
3599 return RT_SUCCESS(rc);
3600}
3601
3602
3603/**
3604 * Disassemble recompiled code.
3605 *
3606 * @param phFileIgnored Ignored, logfile usually.
3607 * @param pvCode Pointer to the code block.
3608 * @param cb Size of the code block.
3609 */
3610void disas(FILE *phFile, void *pvCode, unsigned long cb)
3611{
3612#ifdef DEBUG_TMP_LOGGING
3613# define DISAS_PRINTF(x...) fprintf(phFile, x)
3614#else
3615# define DISAS_PRINTF(x...) RTLogPrintf(x)
3616 if (LogIs2Enabled())
3617#endif
3618 {
3619 unsigned off = 0;
3620 char szOutput[256];
3621 DISCPUSTATE Cpu;
3622
3623 memset(&Cpu, 0, sizeof(Cpu));
3624#ifdef RT_ARCH_X86
3625 Cpu.mode = CPUMODE_32BIT;
3626#else
3627 Cpu.mode = CPUMODE_64BIT;
3628#endif
3629
3630 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3631 while (off < cb)
3632 {
3633 uint32_t cbInstr;
3634 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3635 DISAS_PRINTF("%s", szOutput);
3636 else
3637 {
3638 DISAS_PRINTF("disas error\n");
3639 cbInstr = 1;
3640#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3641 break;
3642#endif
3643 }
3644 off += cbInstr;
3645 }
3646 }
3647
3648#undef DISAS_PRINTF
3649}
3650
3651
3652/**
3653 * Disassemble guest code.
3654 *
3655 * @param phFileIgnored Ignored, logfile usually.
3656 * @param uCode The guest address of the code to disassemble. (flat?)
3657 * @param cb Number of bytes to disassemble.
3658 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3659 */
3660void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3661{
3662#ifdef DEBUG_TMP_LOGGING
3663# define DISAS_PRINTF(x...) fprintf(phFile, x)
3664#else
3665# define DISAS_PRINTF(x...) RTLogPrintf(x)
3666 if (LogIs2Enabled())
3667#endif
3668 {
3669 PVM pVM = cpu_single_env->pVM;
3670 RTSEL cs;
3671 RTGCUINTPTR eip;
3672
3673 /*
3674 * Update the state so DBGF reads the correct register values (flags).
3675 */
3676 remR3StateUpdate(pVM);
3677
3678 /*
3679 * Do the disassembling.
3680 */
3681 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3682 cs = cpu_single_env->segs[R_CS].selector;
3683 eip = uCode - cpu_single_env->segs[R_CS].base;
3684 for (;;)
3685 {
3686 char szBuf[256];
3687 uint32_t cbInstr;
3688 int rc = DBGFR3DisasInstrEx(pVM,
3689 cs,
3690 eip,
3691 0,
3692 szBuf, sizeof(szBuf),
3693 &cbInstr);
3694 if (RT_SUCCESS(rc))
3695 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3696 else
3697 {
3698 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3699 cbInstr = 1;
3700 }
3701
3702 /* next */
3703 if (cb <= cbInstr)
3704 break;
3705 cb -= cbInstr;
3706 uCode += cbInstr;
3707 eip += cbInstr;
3708 }
3709 }
3710#undef DISAS_PRINTF
3711}
3712
3713
3714/**
3715 * Looks up a guest symbol.
3716 *
3717 * @returns Pointer to symbol name. This is a static buffer.
3718 * @param orig_addr The address in question.
3719 */
3720const char *lookup_symbol(target_ulong orig_addr)
3721{
3722 RTGCINTPTR off = 0;
3723 DBGFSYMBOL Sym;
3724 PVM pVM = cpu_single_env->pVM;
3725 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3726 if (RT_SUCCESS(rc))
3727 {
3728 static char szSym[sizeof(Sym.szName) + 48];
3729 if (!off)
3730 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3731 else if (off > 0)
3732 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3733 else
3734 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3735 return szSym;
3736 }
3737 return "<N/A>";
3738}
3739
3740
3741#undef LOG_GROUP
3742#define LOG_GROUP LOG_GROUP_REM
3743
3744
3745/* -+- FF notifications -+- */
3746
3747
3748/**
3749 * Notification about a pending interrupt.
3750 *
3751 * @param pVM VM Handle.
3752 * @param u8Interrupt Interrupt
3753 * @thread The emulation thread.
3754 */
3755REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, uint8_t u8Interrupt)
3756{
3757 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3758 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3759}
3760
3761/**
3762 * Notification about a pending interrupt.
3763 *
3764 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3765 * @param pVM VM Handle.
3766 * @thread The emulation thread.
3767 */
3768REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM)
3769{
3770 return pVM->rem.s.u32PendingInterrupt;
3771}
3772
3773/**
3774 * Notification about the interrupt FF being set.
3775 *
3776 * @param pVM VM Handle.
3777 * @thread The emulation thread.
3778 */
3779REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM)
3780{
3781 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3782 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3783 if (pVM->rem.s.fInREM)
3784 {
3785 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3786 CPU_INTERRUPT_EXTERNAL_HARD);
3787 }
3788}
3789
3790
3791/**
3792 * Notification about the interrupt FF being set.
3793 *
3794 * @param pVM VM Handle.
3795 * @thread Any.
3796 */
3797REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM)
3798{
3799 LogFlow(("REMR3NotifyInterruptClear:\n"));
3800 if (pVM->rem.s.fInREM)
3801 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3802}
3803
3804
3805/**
3806 * Notification about pending timer(s).
3807 *
3808 * @param pVM VM Handle.
3809 * @thread Any.
3810 */
3811REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM)
3812{
3813#ifndef DEBUG_bird
3814 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3815#endif
3816 if (pVM->rem.s.fInREM)
3817 {
3818 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3819 CPU_INTERRUPT_EXTERNAL_TIMER);
3820 }
3821}
3822
3823
3824/**
3825 * Notification about pending DMA transfers.
3826 *
3827 * @param pVM VM Handle.
3828 * @thread Any.
3829 */
3830REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3831{
3832 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3833 if (pVM->rem.s.fInREM)
3834 {
3835 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3836 CPU_INTERRUPT_EXTERNAL_DMA);
3837 }
3838}
3839
3840
3841/**
3842 * Notification about pending timer(s).
3843 *
3844 * @param pVM VM Handle.
3845 * @thread Any.
3846 */
3847REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3848{
3849 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3850 if (pVM->rem.s.fInREM)
3851 {
3852 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3853 CPU_INTERRUPT_EXTERNAL_EXIT);
3854 }
3855}
3856
3857
3858/**
3859 * Notification about pending FF set by an external thread.
3860 *
3861 * @param pVM VM handle.
3862 * @thread Any.
3863 */
3864REMR3DECL(void) REMR3NotifyFF(PVM pVM)
3865{
3866 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
3867 if (pVM->rem.s.fInREM)
3868 {
3869 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3870 CPU_INTERRUPT_EXTERNAL_EXIT);
3871 }
3872}
3873
3874
3875#ifdef VBOX_WITH_STATISTICS
3876void remR3ProfileStart(int statcode)
3877{
3878 STAMPROFILEADV *pStat;
3879 switch(statcode)
3880 {
3881 case STATS_EMULATE_SINGLE_INSTR:
3882 pStat = &gStatExecuteSingleInstr;
3883 break;
3884 case STATS_QEMU_COMPILATION:
3885 pStat = &gStatCompilationQEmu;
3886 break;
3887 case STATS_QEMU_RUN_EMULATED_CODE:
3888 pStat = &gStatRunCodeQEmu;
3889 break;
3890 case STATS_QEMU_TOTAL:
3891 pStat = &gStatTotalTimeQEmu;
3892 break;
3893 case STATS_QEMU_RUN_TIMERS:
3894 pStat = &gStatTimers;
3895 break;
3896 case STATS_TLB_LOOKUP:
3897 pStat= &gStatTBLookup;
3898 break;
3899 case STATS_IRQ_HANDLING:
3900 pStat= &gStatIRQ;
3901 break;
3902 case STATS_RAW_CHECK:
3903 pStat = &gStatRawCheck;
3904 break;
3905
3906 default:
3907 AssertMsgFailed(("unknown stat %d\n", statcode));
3908 return;
3909 }
3910 STAM_PROFILE_ADV_START(pStat, a);
3911}
3912
3913
3914void remR3ProfileStop(int statcode)
3915{
3916 STAMPROFILEADV *pStat;
3917 switch(statcode)
3918 {
3919 case STATS_EMULATE_SINGLE_INSTR:
3920 pStat = &gStatExecuteSingleInstr;
3921 break;
3922 case STATS_QEMU_COMPILATION:
3923 pStat = &gStatCompilationQEmu;
3924 break;
3925 case STATS_QEMU_RUN_EMULATED_CODE:
3926 pStat = &gStatRunCodeQEmu;
3927 break;
3928 case STATS_QEMU_TOTAL:
3929 pStat = &gStatTotalTimeQEmu;
3930 break;
3931 case STATS_QEMU_RUN_TIMERS:
3932 pStat = &gStatTimers;
3933 break;
3934 case STATS_TLB_LOOKUP:
3935 pStat= &gStatTBLookup;
3936 break;
3937 case STATS_IRQ_HANDLING:
3938 pStat= &gStatIRQ;
3939 break;
3940 case STATS_RAW_CHECK:
3941 pStat = &gStatRawCheck;
3942 break;
3943 default:
3944 AssertMsgFailed(("unknown stat %d\n", statcode));
3945 return;
3946 }
3947 STAM_PROFILE_ADV_STOP(pStat, a);
3948}
3949#endif
3950
3951/**
3952 * Raise an RC, force rem exit.
3953 *
3954 * @param pVM VM handle.
3955 * @param rc The rc.
3956 */
3957void remR3RaiseRC(PVM pVM, int rc)
3958{
3959 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
3960 Assert(pVM->rem.s.fInREM);
3961 VM_ASSERT_EMT(pVM);
3962 pVM->rem.s.rc = rc;
3963 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
3964}
3965
3966
3967/* -+- timers -+- */
3968
3969uint64_t cpu_get_tsc(CPUX86State *env)
3970{
3971 STAM_COUNTER_INC(&gStatCpuGetTSC);
3972 return TMCpuTickGet(env->pVM);
3973}
3974
3975
3976/* -+- interrupts -+- */
3977
3978void cpu_set_ferr(CPUX86State *env)
3979{
3980 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
3981 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
3982}
3983
3984int cpu_get_pic_interrupt(CPUState *env)
3985{
3986 uint8_t u8Interrupt;
3987 int rc;
3988
3989 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
3990 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
3991 * with the (a)pic.
3992 */
3993 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
3994 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
3995 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
3996 * remove this kludge. */
3997 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
3998 {
3999 rc = VINF_SUCCESS;
4000 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4001 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4002 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4003 }
4004 else
4005 rc = PDMGetInterrupt(env->pVM, &u8Interrupt);
4006
4007 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4008 if (RT_SUCCESS(rc))
4009 {
4010 if (VM_FF_ISPENDING(env->pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
4011 env->interrupt_request |= CPU_INTERRUPT_HARD;
4012 return u8Interrupt;
4013 }
4014 return -1;
4015}
4016
4017
4018/* -+- local apic -+- */
4019
4020void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4021{
4022 int rc = PDMApicSetBase(env->pVM, val);
4023 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4024}
4025
4026uint64_t cpu_get_apic_base(CPUX86State *env)
4027{
4028 uint64_t u64;
4029 int rc = PDMApicGetBase(env->pVM, &u64);
4030 if (RT_SUCCESS(rc))
4031 {
4032 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4033 return u64;
4034 }
4035 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4036 return 0;
4037}
4038
4039void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4040{
4041 int rc = PDMApicSetTPR(env->pVM, val);
4042 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4043}
4044
4045uint8_t cpu_get_apic_tpr(CPUX86State *env)
4046{
4047 uint8_t u8;
4048 int rc = PDMApicGetTPR(env->pVM, &u8, NULL);
4049 if (RT_SUCCESS(rc))
4050 {
4051 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4052 return u8;
4053 }
4054 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4055 return 0;
4056}
4057
4058
4059uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4060{
4061 uint64_t value;
4062 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4063 if (RT_SUCCESS(rc))
4064 {
4065 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4066 return value;
4067 }
4068 /** @todo: exception ? */
4069 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4070 return value;
4071}
4072
4073void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4074{
4075 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4076 /** @todo: exception if error ? */
4077 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4078}
4079
4080uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4081{
4082 return CPUMGetGuestMsr(env->pVM, msr);
4083}
4084
4085void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4086{
4087 CPUMSetGuestMsr(env->pVM, msr, val);
4088}
4089
4090/* -+- I/O Ports -+- */
4091
4092#undef LOG_GROUP
4093#define LOG_GROUP LOG_GROUP_REM_IOPORT
4094
4095void cpu_outb(CPUState *env, int addr, int val)
4096{
4097 int rc;
4098
4099 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4100 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4101
4102 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4103 if (RT_LIKELY(rc == VINF_SUCCESS))
4104 return;
4105 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4106 {
4107 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4108 remR3RaiseRC(env->pVM, rc);
4109 return;
4110 }
4111 remAbort(rc, __FUNCTION__);
4112}
4113
4114void cpu_outw(CPUState *env, int addr, int val)
4115{
4116 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4117 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4118 if (RT_LIKELY(rc == VINF_SUCCESS))
4119 return;
4120 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4121 {
4122 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4123 remR3RaiseRC(env->pVM, rc);
4124 return;
4125 }
4126 remAbort(rc, __FUNCTION__);
4127}
4128
4129void cpu_outl(CPUState *env, int addr, int val)
4130{
4131 int rc;
4132 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4133 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4134 if (RT_LIKELY(rc == VINF_SUCCESS))
4135 return;
4136 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4137 {
4138 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4139 remR3RaiseRC(env->pVM, rc);
4140 return;
4141 }
4142 remAbort(rc, __FUNCTION__);
4143}
4144
4145int cpu_inb(CPUState *env, int addr)
4146{
4147 uint32_t u32 = 0;
4148 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4149 if (RT_LIKELY(rc == VINF_SUCCESS))
4150 {
4151 if (/*addr != 0x61 && */addr != 0x71)
4152 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4153 return (int)u32;
4154 }
4155 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4156 {
4157 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4158 remR3RaiseRC(env->pVM, rc);
4159 return (int)u32;
4160 }
4161 remAbort(rc, __FUNCTION__);
4162 return 0xff;
4163}
4164
4165int cpu_inw(CPUState *env, int addr)
4166{
4167 uint32_t u32 = 0;
4168 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4169 if (RT_LIKELY(rc == VINF_SUCCESS))
4170 {
4171 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4172 return (int)u32;
4173 }
4174 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4175 {
4176 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4177 remR3RaiseRC(env->pVM, rc);
4178 return (int)u32;
4179 }
4180 remAbort(rc, __FUNCTION__);
4181 return 0xffff;
4182}
4183
4184int cpu_inl(CPUState *env, int addr)
4185{
4186 uint32_t u32 = 0;
4187 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4188 if (RT_LIKELY(rc == VINF_SUCCESS))
4189 {
4190//if (addr==0x01f0 && u32 == 0x6b6d)
4191// loglevel = ~0;
4192 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4193 return (int)u32;
4194 }
4195 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4196 {
4197 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4198 remR3RaiseRC(env->pVM, rc);
4199 return (int)u32;
4200 }
4201 remAbort(rc, __FUNCTION__);
4202 return 0xffffffff;
4203}
4204
4205#undef LOG_GROUP
4206#define LOG_GROUP LOG_GROUP_REM
4207
4208
4209/* -+- helpers and misc other interfaces -+- */
4210
4211/**
4212 * Perform the CPUID instruction.
4213 *
4214 * ASMCpuId cannot be invoked from some source files where this is used because of global
4215 * register allocations.
4216 *
4217 * @param env Pointer to the recompiler CPU structure.
4218 * @param uOperator CPUID operation (eax).
4219 * @param pvEAX Where to store eax.
4220 * @param pvEBX Where to store ebx.
4221 * @param pvECX Where to store ecx.
4222 * @param pvEDX Where to store edx.
4223 */
4224void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4225{
4226 CPUMGetGuestCpuId(env->pVM, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4227}
4228
4229
4230#if 0 /* not used */
4231/**
4232 * Interface for qemu hardware to report back fatal errors.
4233 */
4234void hw_error(const char *pszFormat, ...)
4235{
4236 /*
4237 * Bitch about it.
4238 */
4239 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4240 * this in my Odin32 tree at home! */
4241 va_list args;
4242 va_start(args, pszFormat);
4243 RTLogPrintf("fatal error in virtual hardware:");
4244 RTLogPrintfV(pszFormat, args);
4245 va_end(args);
4246 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4247
4248 /*
4249 * If we're in REM context we'll sync back the state before 'jumping' to
4250 * the EMs failure handling.
4251 */
4252 PVM pVM = cpu_single_env->pVM;
4253 if (pVM->rem.s.fInREM)
4254 REMR3StateBack(pVM);
4255 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4256 AssertMsgFailed(("EMR3FatalError returned!\n"));
4257}
4258#endif
4259
4260/**
4261 * Interface for the qemu cpu to report unhandled situation
4262 * raising a fatal VM error.
4263 */
4264void cpu_abort(CPUState *env, const char *pszFormat, ...)
4265{
4266 va_list args;
4267 PVM pVM;
4268
4269 /*
4270 * Bitch about it.
4271 */
4272#ifndef _MSC_VER
4273 /** @todo: MSVC is right - it's not valid C */
4274 RTLogFlags(NULL, "nodisabled nobuffered");
4275#endif
4276 va_start(args, pszFormat);
4277 RTLogPrintf("fatal error in recompiler cpu: %N\n", pszFormat, &args);
4278 va_end(args);
4279 va_start(args, pszFormat);
4280 AssertReleaseMsgFailed(("fatal error in recompiler cpu: %N\n", pszFormat, &args));
4281 va_end(args);
4282
4283 /*
4284 * If we're in REM context we'll sync back the state before 'jumping' to
4285 * the EMs failure handling.
4286 */
4287 pVM = cpu_single_env->pVM;
4288 if (pVM->rem.s.fInREM)
4289 REMR3StateBack(pVM);
4290 EMR3FatalError(pVM, VERR_REM_VIRTUAL_CPU_ERROR);
4291 AssertMsgFailed(("EMR3FatalError returned!\n"));
4292}
4293
4294
4295/**
4296 * Aborts the VM.
4297 *
4298 * @param rc VBox error code.
4299 * @param pszTip Hint about why/when this happend.
4300 */
4301void remAbort(int rc, const char *pszTip)
4302{
4303 PVM pVM;
4304
4305 /*
4306 * Bitch about it.
4307 */
4308 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4309 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4310
4311 /*
4312 * Jump back to where we entered the recompiler.
4313 */
4314 pVM = cpu_single_env->pVM;
4315 if (pVM->rem.s.fInREM)
4316 REMR3StateBack(pVM);
4317 EMR3FatalError(pVM, rc);
4318 AssertMsgFailed(("EMR3FatalError returned!\n"));
4319}
4320
4321
4322/**
4323 * Dumps a linux system call.
4324 * @param pVM VM handle.
4325 */
4326void remR3DumpLnxSyscall(PVM pVM)
4327{
4328 static const char *apsz[] =
4329 {
4330 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4331 "sys_exit",
4332 "sys_fork",
4333 "sys_read",
4334 "sys_write",
4335 "sys_open", /* 5 */
4336 "sys_close",
4337 "sys_waitpid",
4338 "sys_creat",
4339 "sys_link",
4340 "sys_unlink", /* 10 */
4341 "sys_execve",
4342 "sys_chdir",
4343 "sys_time",
4344 "sys_mknod",
4345 "sys_chmod", /* 15 */
4346 "sys_lchown16",
4347 "sys_ni_syscall", /* old break syscall holder */
4348 "sys_stat",
4349 "sys_lseek",
4350 "sys_getpid", /* 20 */
4351 "sys_mount",
4352 "sys_oldumount",
4353 "sys_setuid16",
4354 "sys_getuid16",
4355 "sys_stime", /* 25 */
4356 "sys_ptrace",
4357 "sys_alarm",
4358 "sys_fstat",
4359 "sys_pause",
4360 "sys_utime", /* 30 */
4361 "sys_ni_syscall", /* old stty syscall holder */
4362 "sys_ni_syscall", /* old gtty syscall holder */
4363 "sys_access",
4364 "sys_nice",
4365 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4366 "sys_sync",
4367 "sys_kill",
4368 "sys_rename",
4369 "sys_mkdir",
4370 "sys_rmdir", /* 40 */
4371 "sys_dup",
4372 "sys_pipe",
4373 "sys_times",
4374 "sys_ni_syscall", /* old prof syscall holder */
4375 "sys_brk", /* 45 */
4376 "sys_setgid16",
4377 "sys_getgid16",
4378 "sys_signal",
4379 "sys_geteuid16",
4380 "sys_getegid16", /* 50 */
4381 "sys_acct",
4382 "sys_umount", /* recycled never used phys() */
4383 "sys_ni_syscall", /* old lock syscall holder */
4384 "sys_ioctl",
4385 "sys_fcntl", /* 55 */
4386 "sys_ni_syscall", /* old mpx syscall holder */
4387 "sys_setpgid",
4388 "sys_ni_syscall", /* old ulimit syscall holder */
4389 "sys_olduname",
4390 "sys_umask", /* 60 */
4391 "sys_chroot",
4392 "sys_ustat",
4393 "sys_dup2",
4394 "sys_getppid",
4395 "sys_getpgrp", /* 65 */
4396 "sys_setsid",
4397 "sys_sigaction",
4398 "sys_sgetmask",
4399 "sys_ssetmask",
4400 "sys_setreuid16", /* 70 */
4401 "sys_setregid16",
4402 "sys_sigsuspend",
4403 "sys_sigpending",
4404 "sys_sethostname",
4405 "sys_setrlimit", /* 75 */
4406 "sys_old_getrlimit",
4407 "sys_getrusage",
4408 "sys_gettimeofday",
4409 "sys_settimeofday",
4410 "sys_getgroups16", /* 80 */
4411 "sys_setgroups16",
4412 "old_select",
4413 "sys_symlink",
4414 "sys_lstat",
4415 "sys_readlink", /* 85 */
4416 "sys_uselib",
4417 "sys_swapon",
4418 "sys_reboot",
4419 "old_readdir",
4420 "old_mmap", /* 90 */
4421 "sys_munmap",
4422 "sys_truncate",
4423 "sys_ftruncate",
4424 "sys_fchmod",
4425 "sys_fchown16", /* 95 */
4426 "sys_getpriority",
4427 "sys_setpriority",
4428 "sys_ni_syscall", /* old profil syscall holder */
4429 "sys_statfs",
4430 "sys_fstatfs", /* 100 */
4431 "sys_ioperm",
4432 "sys_socketcall",
4433 "sys_syslog",
4434 "sys_setitimer",
4435 "sys_getitimer", /* 105 */
4436 "sys_newstat",
4437 "sys_newlstat",
4438 "sys_newfstat",
4439 "sys_uname",
4440 "sys_iopl", /* 110 */
4441 "sys_vhangup",
4442 "sys_ni_syscall", /* old "idle" system call */
4443 "sys_vm86old",
4444 "sys_wait4",
4445 "sys_swapoff", /* 115 */
4446 "sys_sysinfo",
4447 "sys_ipc",
4448 "sys_fsync",
4449 "sys_sigreturn",
4450 "sys_clone", /* 120 */
4451 "sys_setdomainname",
4452 "sys_newuname",
4453 "sys_modify_ldt",
4454 "sys_adjtimex",
4455 "sys_mprotect", /* 125 */
4456 "sys_sigprocmask",
4457 "sys_ni_syscall", /* old "create_module" */
4458 "sys_init_module",
4459 "sys_delete_module",
4460 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4461 "sys_quotactl",
4462 "sys_getpgid",
4463 "sys_fchdir",
4464 "sys_bdflush",
4465 "sys_sysfs", /* 135 */
4466 "sys_personality",
4467 "sys_ni_syscall", /* reserved for afs_syscall */
4468 "sys_setfsuid16",
4469 "sys_setfsgid16",
4470 "sys_llseek", /* 140 */
4471 "sys_getdents",
4472 "sys_select",
4473 "sys_flock",
4474 "sys_msync",
4475 "sys_readv", /* 145 */
4476 "sys_writev",
4477 "sys_getsid",
4478 "sys_fdatasync",
4479 "sys_sysctl",
4480 "sys_mlock", /* 150 */
4481 "sys_munlock",
4482 "sys_mlockall",
4483 "sys_munlockall",
4484 "sys_sched_setparam",
4485 "sys_sched_getparam", /* 155 */
4486 "sys_sched_setscheduler",
4487 "sys_sched_getscheduler",
4488 "sys_sched_yield",
4489 "sys_sched_get_priority_max",
4490 "sys_sched_get_priority_min", /* 160 */
4491 "sys_sched_rr_get_interval",
4492 "sys_nanosleep",
4493 "sys_mremap",
4494 "sys_setresuid16",
4495 "sys_getresuid16", /* 165 */
4496 "sys_vm86",
4497 "sys_ni_syscall", /* Old sys_query_module */
4498 "sys_poll",
4499 "sys_nfsservctl",
4500 "sys_setresgid16", /* 170 */
4501 "sys_getresgid16",
4502 "sys_prctl",
4503 "sys_rt_sigreturn",
4504 "sys_rt_sigaction",
4505 "sys_rt_sigprocmask", /* 175 */
4506 "sys_rt_sigpending",
4507 "sys_rt_sigtimedwait",
4508 "sys_rt_sigqueueinfo",
4509 "sys_rt_sigsuspend",
4510 "sys_pread64", /* 180 */
4511 "sys_pwrite64",
4512 "sys_chown16",
4513 "sys_getcwd",
4514 "sys_capget",
4515 "sys_capset", /* 185 */
4516 "sys_sigaltstack",
4517 "sys_sendfile",
4518 "sys_ni_syscall", /* reserved for streams1 */
4519 "sys_ni_syscall", /* reserved for streams2 */
4520 "sys_vfork", /* 190 */
4521 "sys_getrlimit",
4522 "sys_mmap2",
4523 "sys_truncate64",
4524 "sys_ftruncate64",
4525 "sys_stat64", /* 195 */
4526 "sys_lstat64",
4527 "sys_fstat64",
4528 "sys_lchown",
4529 "sys_getuid",
4530 "sys_getgid", /* 200 */
4531 "sys_geteuid",
4532 "sys_getegid",
4533 "sys_setreuid",
4534 "sys_setregid",
4535 "sys_getgroups", /* 205 */
4536 "sys_setgroups",
4537 "sys_fchown",
4538 "sys_setresuid",
4539 "sys_getresuid",
4540 "sys_setresgid", /* 210 */
4541 "sys_getresgid",
4542 "sys_chown",
4543 "sys_setuid",
4544 "sys_setgid",
4545 "sys_setfsuid", /* 215 */
4546 "sys_setfsgid",
4547 "sys_pivot_root",
4548 "sys_mincore",
4549 "sys_madvise",
4550 "sys_getdents64", /* 220 */
4551 "sys_fcntl64",
4552 "sys_ni_syscall", /* reserved for TUX */
4553 "sys_ni_syscall",
4554 "sys_gettid",
4555 "sys_readahead", /* 225 */
4556 "sys_setxattr",
4557 "sys_lsetxattr",
4558 "sys_fsetxattr",
4559 "sys_getxattr",
4560 "sys_lgetxattr", /* 230 */
4561 "sys_fgetxattr",
4562 "sys_listxattr",
4563 "sys_llistxattr",
4564 "sys_flistxattr",
4565 "sys_removexattr", /* 235 */
4566 "sys_lremovexattr",
4567 "sys_fremovexattr",
4568 "sys_tkill",
4569 "sys_sendfile64",
4570 "sys_futex", /* 240 */
4571 "sys_sched_setaffinity",
4572 "sys_sched_getaffinity",
4573 "sys_set_thread_area",
4574 "sys_get_thread_area",
4575 "sys_io_setup", /* 245 */
4576 "sys_io_destroy",
4577 "sys_io_getevents",
4578 "sys_io_submit",
4579 "sys_io_cancel",
4580 "sys_fadvise64", /* 250 */
4581 "sys_ni_syscall",
4582 "sys_exit_group",
4583 "sys_lookup_dcookie",
4584 "sys_epoll_create",
4585 "sys_epoll_ctl", /* 255 */
4586 "sys_epoll_wait",
4587 "sys_remap_file_pages",
4588 "sys_set_tid_address",
4589 "sys_timer_create",
4590 "sys_timer_settime", /* 260 */
4591 "sys_timer_gettime",
4592 "sys_timer_getoverrun",
4593 "sys_timer_delete",
4594 "sys_clock_settime",
4595 "sys_clock_gettime", /* 265 */
4596 "sys_clock_getres",
4597 "sys_clock_nanosleep",
4598 "sys_statfs64",
4599 "sys_fstatfs64",
4600 "sys_tgkill", /* 270 */
4601 "sys_utimes",
4602 "sys_fadvise64_64",
4603 "sys_ni_syscall" /* sys_vserver */
4604 };
4605
4606 uint32_t uEAX = CPUMGetGuestEAX(pVM);
4607 switch (uEAX)
4608 {
4609 default:
4610 if (uEAX < RT_ELEMENTS(apsz))
4611 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4612 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), CPUMGetGuestEBX(pVM), CPUMGetGuestECX(pVM),
4613 CPUMGetGuestEDX(pVM), CPUMGetGuestESI(pVM), CPUMGetGuestEDI(pVM), CPUMGetGuestEBP(pVM)));
4614 else
4615 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVM), uEAX, uEAX));
4616 break;
4617
4618 }
4619}
4620
4621
4622/**
4623 * Dumps an OpenBSD system call.
4624 * @param pVM VM handle.
4625 */
4626void remR3DumpOBsdSyscall(PVM pVM)
4627{
4628 static const char *apsz[] =
4629 {
4630 "SYS_syscall", //0
4631 "SYS_exit", //1
4632 "SYS_fork", //2
4633 "SYS_read", //3
4634 "SYS_write", //4
4635 "SYS_open", //5
4636 "SYS_close", //6
4637 "SYS_wait4", //7
4638 "SYS_8",
4639 "SYS_link", //9
4640 "SYS_unlink", //10
4641 "SYS_11",
4642 "SYS_chdir", //12
4643 "SYS_fchdir", //13
4644 "SYS_mknod", //14
4645 "SYS_chmod", //15
4646 "SYS_chown", //16
4647 "SYS_break", //17
4648 "SYS_18",
4649 "SYS_19",
4650 "SYS_getpid", //20
4651 "SYS_mount", //21
4652 "SYS_unmount", //22
4653 "SYS_setuid", //23
4654 "SYS_getuid", //24
4655 "SYS_geteuid", //25
4656 "SYS_ptrace", //26
4657 "SYS_recvmsg", //27
4658 "SYS_sendmsg", //28
4659 "SYS_recvfrom", //29
4660 "SYS_accept", //30
4661 "SYS_getpeername", //31
4662 "SYS_getsockname", //32
4663 "SYS_access", //33
4664 "SYS_chflags", //34
4665 "SYS_fchflags", //35
4666 "SYS_sync", //36
4667 "SYS_kill", //37
4668 "SYS_38",
4669 "SYS_getppid", //39
4670 "SYS_40",
4671 "SYS_dup", //41
4672 "SYS_opipe", //42
4673 "SYS_getegid", //43
4674 "SYS_profil", //44
4675 "SYS_ktrace", //45
4676 "SYS_sigaction", //46
4677 "SYS_getgid", //47
4678 "SYS_sigprocmask", //48
4679 "SYS_getlogin", //49
4680 "SYS_setlogin", //50
4681 "SYS_acct", //51
4682 "SYS_sigpending", //52
4683 "SYS_osigaltstack", //53
4684 "SYS_ioctl", //54
4685 "SYS_reboot", //55
4686 "SYS_revoke", //56
4687 "SYS_symlink", //57
4688 "SYS_readlink", //58
4689 "SYS_execve", //59
4690 "SYS_umask", //60
4691 "SYS_chroot", //61
4692 "SYS_62",
4693 "SYS_63",
4694 "SYS_64",
4695 "SYS_65",
4696 "SYS_vfork", //66
4697 "SYS_67",
4698 "SYS_68",
4699 "SYS_sbrk", //69
4700 "SYS_sstk", //70
4701 "SYS_61",
4702 "SYS_vadvise", //72
4703 "SYS_munmap", //73
4704 "SYS_mprotect", //74
4705 "SYS_madvise", //75
4706 "SYS_76",
4707 "SYS_77",
4708 "SYS_mincore", //78
4709 "SYS_getgroups", //79
4710 "SYS_setgroups", //80
4711 "SYS_getpgrp", //81
4712 "SYS_setpgid", //82
4713 "SYS_setitimer", //83
4714 "SYS_84",
4715 "SYS_85",
4716 "SYS_getitimer", //86
4717 "SYS_87",
4718 "SYS_88",
4719 "SYS_89",
4720 "SYS_dup2", //90
4721 "SYS_91",
4722 "SYS_fcntl", //92
4723 "SYS_select", //93
4724 "SYS_94",
4725 "SYS_fsync", //95
4726 "SYS_setpriority", //96
4727 "SYS_socket", //97
4728 "SYS_connect", //98
4729 "SYS_99",
4730 "SYS_getpriority", //100
4731 "SYS_101",
4732 "SYS_102",
4733 "SYS_sigreturn", //103
4734 "SYS_bind", //104
4735 "SYS_setsockopt", //105
4736 "SYS_listen", //106
4737 "SYS_107",
4738 "SYS_108",
4739 "SYS_109",
4740 "SYS_110",
4741 "SYS_sigsuspend", //111
4742 "SYS_112",
4743 "SYS_113",
4744 "SYS_114",
4745 "SYS_115",
4746 "SYS_gettimeofday", //116
4747 "SYS_getrusage", //117
4748 "SYS_getsockopt", //118
4749 "SYS_119",
4750 "SYS_readv", //120
4751 "SYS_writev", //121
4752 "SYS_settimeofday", //122
4753 "SYS_fchown", //123
4754 "SYS_fchmod", //124
4755 "SYS_125",
4756 "SYS_setreuid", //126
4757 "SYS_setregid", //127
4758 "SYS_rename", //128
4759 "SYS_129",
4760 "SYS_130",
4761 "SYS_flock", //131
4762 "SYS_mkfifo", //132
4763 "SYS_sendto", //133
4764 "SYS_shutdown", //134
4765 "SYS_socketpair", //135
4766 "SYS_mkdir", //136
4767 "SYS_rmdir", //137
4768 "SYS_utimes", //138
4769 "SYS_139",
4770 "SYS_adjtime", //140
4771 "SYS_141",
4772 "SYS_142",
4773 "SYS_143",
4774 "SYS_144",
4775 "SYS_145",
4776 "SYS_146",
4777 "SYS_setsid", //147
4778 "SYS_quotactl", //148
4779 "SYS_149",
4780 "SYS_150",
4781 "SYS_151",
4782 "SYS_152",
4783 "SYS_153",
4784 "SYS_154",
4785 "SYS_nfssvc", //155
4786 "SYS_156",
4787 "SYS_157",
4788 "SYS_158",
4789 "SYS_159",
4790 "SYS_160",
4791 "SYS_getfh", //161
4792 "SYS_162",
4793 "SYS_163",
4794 "SYS_164",
4795 "SYS_sysarch", //165
4796 "SYS_166",
4797 "SYS_167",
4798 "SYS_168",
4799 "SYS_169",
4800 "SYS_170",
4801 "SYS_171",
4802 "SYS_172",
4803 "SYS_pread", //173
4804 "SYS_pwrite", //174
4805 "SYS_175",
4806 "SYS_176",
4807 "SYS_177",
4808 "SYS_178",
4809 "SYS_179",
4810 "SYS_180",
4811 "SYS_setgid", //181
4812 "SYS_setegid", //182
4813 "SYS_seteuid", //183
4814 "SYS_lfs_bmapv", //184
4815 "SYS_lfs_markv", //185
4816 "SYS_lfs_segclean", //186
4817 "SYS_lfs_segwait", //187
4818 "SYS_188",
4819 "SYS_189",
4820 "SYS_190",
4821 "SYS_pathconf", //191
4822 "SYS_fpathconf", //192
4823 "SYS_swapctl", //193
4824 "SYS_getrlimit", //194
4825 "SYS_setrlimit", //195
4826 "SYS_getdirentries", //196
4827 "SYS_mmap", //197
4828 "SYS___syscall", //198
4829 "SYS_lseek", //199
4830 "SYS_truncate", //200
4831 "SYS_ftruncate", //201
4832 "SYS___sysctl", //202
4833 "SYS_mlock", //203
4834 "SYS_munlock", //204
4835 "SYS_205",
4836 "SYS_futimes", //206
4837 "SYS_getpgid", //207
4838 "SYS_xfspioctl", //208
4839 "SYS_209",
4840 "SYS_210",
4841 "SYS_211",
4842 "SYS_212",
4843 "SYS_213",
4844 "SYS_214",
4845 "SYS_215",
4846 "SYS_216",
4847 "SYS_217",
4848 "SYS_218",
4849 "SYS_219",
4850 "SYS_220",
4851 "SYS_semget", //221
4852 "SYS_222",
4853 "SYS_223",
4854 "SYS_224",
4855 "SYS_msgget", //225
4856 "SYS_msgsnd", //226
4857 "SYS_msgrcv", //227
4858 "SYS_shmat", //228
4859 "SYS_229",
4860 "SYS_shmdt", //230
4861 "SYS_231",
4862 "SYS_clock_gettime", //232
4863 "SYS_clock_settime", //233
4864 "SYS_clock_getres", //234
4865 "SYS_235",
4866 "SYS_236",
4867 "SYS_237",
4868 "SYS_238",
4869 "SYS_239",
4870 "SYS_nanosleep", //240
4871 "SYS_241",
4872 "SYS_242",
4873 "SYS_243",
4874 "SYS_244",
4875 "SYS_245",
4876 "SYS_246",
4877 "SYS_247",
4878 "SYS_248",
4879 "SYS_249",
4880 "SYS_minherit", //250
4881 "SYS_rfork", //251
4882 "SYS_poll", //252
4883 "SYS_issetugid", //253
4884 "SYS_lchown", //254
4885 "SYS_getsid", //255
4886 "SYS_msync", //256
4887 "SYS_257",
4888 "SYS_258",
4889 "SYS_259",
4890 "SYS_getfsstat", //260
4891 "SYS_statfs", //261
4892 "SYS_fstatfs", //262
4893 "SYS_pipe", //263
4894 "SYS_fhopen", //264
4895 "SYS_265",
4896 "SYS_fhstatfs", //266
4897 "SYS_preadv", //267
4898 "SYS_pwritev", //268
4899 "SYS_kqueue", //269
4900 "SYS_kevent", //270
4901 "SYS_mlockall", //271
4902 "SYS_munlockall", //272
4903 "SYS_getpeereid", //273
4904 "SYS_274",
4905 "SYS_275",
4906 "SYS_276",
4907 "SYS_277",
4908 "SYS_278",
4909 "SYS_279",
4910 "SYS_280",
4911 "SYS_getresuid", //281
4912 "SYS_setresuid", //282
4913 "SYS_getresgid", //283
4914 "SYS_setresgid", //284
4915 "SYS_285",
4916 "SYS_mquery", //286
4917 "SYS_closefrom", //287
4918 "SYS_sigaltstack", //288
4919 "SYS_shmget", //289
4920 "SYS_semop", //290
4921 "SYS_stat", //291
4922 "SYS_fstat", //292
4923 "SYS_lstat", //293
4924 "SYS_fhstat", //294
4925 "SYS___semctl", //295
4926 "SYS_shmctl", //296
4927 "SYS_msgctl", //297
4928 "SYS_MAXSYSCALL", //298
4929 //299
4930 //300
4931 };
4932 uint32_t uEAX;
4933 if (!LogIsEnabled())
4934 return;
4935 uEAX = CPUMGetGuestEAX(pVM);
4936 switch (uEAX)
4937 {
4938 default:
4939 if (uEAX < RT_ELEMENTS(apsz))
4940 {
4941 uint32_t au32Args[8] = {0};
4942 PGMPhysSimpleReadGCPtr(pVM, au32Args, CPUMGetGuestESP(pVM), sizeof(au32Args));
4943 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
4944 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVM), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
4945 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
4946 }
4947 else
4948 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVM), uEAX, uEAX);
4949 break;
4950 }
4951}
4952
4953
4954#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
4955/**
4956 * The Dll main entry point (stub).
4957 */
4958bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
4959{
4960 return true;
4961}
4962
4963void *memcpy(void *dst, const void *src, size_t size)
4964{
4965 uint8_t*pbDst = dst, *pbSrc = src;
4966 while (size-- > 0)
4967 *pbDst++ = *pbSrc++;
4968 return dst;
4969}
4970
4971#endif
4972
4973void cpu_smm_update(CPUState *env)
4974{
4975}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette