VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PATMAll.cpp@ 60309

Last change on this file since 60309 was 58126, checked in by vboxsync, 9 years ago

VMM: Fixed almost all the Doxygen warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.6 KB
Line 
1/* $Id: PATMAll.cpp 58126 2015-10-08 20:59:48Z vboxsync $ */
2/** @file
3 * PATM - The Patch Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PATM
23#include <VBox/vmm/patm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/hm.h>
27#include <VBox/vmm/selm.h>
28#include <VBox/vmm/mm.h>
29#include "PATMInternal.h"
30#include <VBox/vmm/vm.h>
31#include <VBox/vmm/vmm.h>
32#include "PATMA.h"
33
34#include <VBox/dis.h>
35#include <VBox/disopcode.h>
36#include <VBox/err.h>
37#include <VBox/log.h>
38#include <iprt/assert.h>
39#include <iprt/string.h>
40
41
42/**
43 * @callback_method_impl{FNPGMPHYSHANDLER, PATM all access handler callback.}
44 *
45 * @remarks The @a pvUser argument is the base address of the page being
46 * monitored.
47 */
48PGM_ALL_CB2_DECL(VBOXSTRICTRC)
49patmVirtPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
50 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
51{
52 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);
53 NOREF(pvPtr); NOREF(pvBuf); NOREF(cbBuf); NOREF(enmOrigin); NOREF(pvUser);
54
55 Assert(pvUser);
56 Assert(!((uintptr_t)pvUser & PAGE_OFFSET_MASK));
57 Assert(((uintptr_t)pvUser + (GCPtr & PAGE_OFFSET_MASK)) == GCPtr);
58
59 pVM->patm.s.pvFaultMonitor = (RTRCPTR)GCPtr;
60#ifdef IN_RING3
61 PATMR3HandleMonitoredPage(pVM);
62 return VINF_PGM_HANDLER_DO_DEFAULT;
63#else
64 /* RC: Go handle this in ring-3. */
65 return VINF_PATM_CHECK_PATCH_PAGE;
66#endif
67}
68
69
70/**
71 * Load virtualized flags.
72 *
73 * This function is called from CPUMRawEnter(). It doesn't have to update the
74 * IF and IOPL eflags bits, the caller will enforce those to set and 0 respectively.
75 *
76 * @param pVM The cross context VM structure.
77 * @param pCtx The cpu context.
78 * @see pg_raw
79 */
80VMM_INT_DECL(void) PATMRawEnter(PVM pVM, PCPUMCTX pCtx)
81{
82 Assert(!HMIsEnabled(pVM));
83
84 /*
85 * Currently we don't bother to check whether PATM is enabled or not.
86 * For all cases where it isn't, IOPL will be safe and IF will be set.
87 */
88 uint32_t efl = pCtx->eflags.u32;
89 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
90
91 AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtx->eip),
92 ("X86_EFL_IF is clear and PATM is disabled! (eip=%RRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n",
93 pCtx->eip, pCtx->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC,
94 pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));
95
96 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || PATMIsPatchGCAddr(pVM, pCtx->eip),
97 ("fPIF=%d eip=%RRv\n", pVM->patm.s.CTXSUFF(pGCState)->fPIF, pCtx->eip));
98
99 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
100 efl |= X86_EFL_IF;
101 pCtx->eflags.u32 = efl;
102
103#ifdef IN_RING3
104# ifdef PATM_EMULATE_SYSENTER
105 PCPUMCTX pCtx;
106
107 /* Check if the sysenter handler has changed. */
108 pCtx = CPUMQueryGuestCtxPtr(pVM);
109 if ( pCtx->SysEnter.cs != 0
110 && pCtx->SysEnter.eip != 0
111 )
112 {
113 if (pVM->patm.s.pfnSysEnterGC != (RTRCPTR)pCtx->SysEnter.eip)
114 {
115 pVM->patm.s.pfnSysEnterPatchGC = 0;
116 pVM->patm.s.pfnSysEnterGC = 0;
117
118 Log2(("PATMRawEnter: installing sysenter patch for %RRv\n", pCtx->SysEnter.eip));
119 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
120 if (pVM->patm.s.pfnSysEnterPatchGC == 0)
121 {
122 rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
123 if (rc == VINF_SUCCESS)
124 {
125 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
126 pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
127 Assert(pVM->patm.s.pfnSysEnterPatchGC);
128 }
129 }
130 else
131 pVM->patm.s.pfnSysEnterGC = (RTRCPTR)pCtx->SysEnter.eip;
132 }
133 }
134 else
135 {
136 pVM->patm.s.pfnSysEnterPatchGC = 0;
137 pVM->patm.s.pfnSysEnterGC = 0;
138 }
139# endif /* PATM_EMULATE_SYSENTER */
140#endif
141}
142
143
144/**
145 * Restores virtualized flags.
146 *
147 * This function is called from CPUMRawLeave(). It will update the eflags register.
148 *
149 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
150 *
151 * @param pVM The cross context VM structure.
152 * @param pCtx The cpu context.
153 * @param rawRC Raw mode return code
154 * @see @ref pg_raw
155 */
156VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTX pCtx, int rawRC)
157{
158 Assert(!HMIsEnabled(pVM));
159 bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtx->eip);
160
161 /*
162 * We will only be called if PATMRawEnter was previously called.
163 */
164 uint32_t efl = pCtx->eflags.u32;
165 efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
166 pCtx->eflags.u32 = efl;
167 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;
168
169 AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtx->eip, rawRC));
170 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtx->eip, rawRC));
171
172#ifdef IN_RING3
173 if ( (efl & X86_EFL_IF)
174 && fPatchCode)
175 {
176 if ( rawRC < VINF_PATM_LEAVE_RC_FIRST
177 || rawRC > VINF_PATM_LEAVE_RC_LAST)
178 {
179 /*
180 * Golden rules:
181 * - Don't interrupt special patch streams that replace special instructions
182 * - Don't break instruction fusing (sti, pop ss, mov ss)
183 * - Don't go back to an instruction that has been overwritten by a patch jump
184 * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
185 *
186 */
187 if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */
188 {
189 PATMTRANSSTATE enmState;
190 RTRCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
191
192 AssertRelease(pOrgInstrGC);
193
194 Assert(enmState != PATMTRANS_OVERWRITTEN);
195 if (enmState == PATMTRANS_SAFE)
196 {
197 Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
198 Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtx->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
199 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
200 pCtx->eip = pOrgInstrGC;
201 fPatchCode = false; /* to reset the stack ptr */
202
203 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
204 }
205 else
206 {
207 LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n", pCtx->eip, enmState));
208 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
209 }
210 }
211 else
212 {
213 LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n", pCtx->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
214 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
215 }
216 }
217 }
218#else /* !IN_RING3 */
219 /*
220 * When leaving raw-mode state while IN_RC, it's generally for interpreting
221 * a single original guest instruction.
222 */
223 AssertMsg(!fPatchCode, ("eip=%RRv\n", pCtx->eip));
224#endif /* !IN_RING3 */
225
226 if (!fPatchCode)
227 {
228 if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtx->eip)
229 {
230 EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtx->eip);
231 }
232 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
233
234 /* Reset the stack pointer to the top of the stack. */
235#ifdef DEBUG
236 if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
237 {
238 LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
239 }
240#endif
241 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
242 }
243}
244
245/**
246 * Get the EFLAGS.
247 * This is a worker for CPUMRawGetEFlags().
248 *
249 * @returns The eflags.
250 * @param pVM The cross context VM structure.
251 * @param pCtx The guest cpu context.
252 */
253VMM_INT_DECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTX pCtx)
254{
255 Assert(!HMIsEnabled(pVM));
256 uint32_t efl = pCtx->eflags.u32;
257 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
258 efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
259 return efl;
260}
261
262/**
263 * Updates the EFLAGS.
264 * This is a worker for CPUMRawSetEFlags().
265 *
266 * @param pVM The cross context VM structure.
267 * @param pCtx The guest cpu context.
268 * @param efl The new EFLAGS value.
269 */
270VMM_INT_DECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTX pCtx, uint32_t efl)
271{
272 Assert(!HMIsEnabled(pVM));
273 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
274 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
275 efl |= X86_EFL_IF;
276 pCtx->eflags.u32 = efl;
277}
278
279/**
280 * Check if we must use raw mode (patch code being executed)
281 *
282 * @param pVM The cross context VM structure.
283 * @param pAddrGC Guest context address
284 */
285VMM_INT_DECL(bool) PATMShouldUseRawMode(PVM pVM, RTRCPTR pAddrGC)
286{
287 return PATMIsEnabled(pVM)
288 && ( (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem
289 || (RTRCUINTPTR)pAddrGC - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC < pVM->patm.s.cbPatchHelpers);
290}
291
292/**
293 * Returns the guest context pointer and size of the GC context structure
294 *
295 * @returns VBox status code.
296 * @param pVM The cross context VM structure.
297 */
298VMM_INT_DECL(RCPTRTYPE(PPATMGCSTATE)) PATMGetGCState(PVM pVM)
299{
300 AssertReturn(!HMIsEnabled(pVM), NIL_RTRCPTR);
301 return pVM->patm.s.pGCStateGC;
302}
303
304/**
305 * Checks whether the GC address is part of our patch or helper regions.
306 *
307 * @returns VBox status code.
308 * @param pVM The cross context VM structure.
309 * @param uGCAddr Guest context address.
310 * @internal
311 */
312VMMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTRCUINTPTR uGCAddr)
313{
314 return PATMIsEnabled(pVM)
315 && ( uGCAddr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem
316 || uGCAddr - (RTRCUINTPTR)pVM->patm.s.pbPatchHelpersRC < pVM->patm.s.cbPatchHelpers);
317}
318
319/**
320 * Checks whether the GC address is part of our patch region.
321 *
322 * @returns VBox status code.
323 * @param pVM The cross context VM structure.
324 * @param uGCAddr Guest context address.
325 * @internal
326 */
327VMMDECL(bool) PATMIsPatchGCAddrExclHelpers(PVM pVM, RTRCUINTPTR uGCAddr)
328{
329 return PATMIsEnabled(pVM)
330 && uGCAddr - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC < pVM->patm.s.cbPatchMem;
331}
332
333/**
334 * Reads patch code.
335 *
336 * @retval VINF_SUCCESS on success.
337 * @retval VERR_PATCH_NOT_FOUND if the request is entirely outside the patch
338 * code.
339 *
340 * @param pVM The cross context VM structure.
341 * @param GCPtrPatchCode The patch address to start reading at.
342 * @param pvDst Where to return the patch code.
343 * @param cbToRead Number of bytes to read.
344 * @param pcbRead Where to return the actual number of bytes we've
345 * read. Optional.
346 */
347VMM_INT_DECL(int) PATMReadPatchCode(PVM pVM, RTGCPTR GCPtrPatchCode, void *pvDst, size_t cbToRead, size_t *pcbRead)
348{
349 /* Shortcut. */
350 if (!PATMIsEnabled(pVM))
351 return VERR_PATCH_NOT_FOUND;
352 Assert(!HMIsEnabled(pVM));
353
354 /*
355 * Check patch code and patch helper code. We assume the requested bytes
356 * are not in either.
357 */
358 RTGCPTR offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pPatchMemGC;
359 if (offPatchCode >= pVM->patm.s.cbPatchMem)
360 {
361 offPatchCode = GCPtrPatchCode - (RTGCPTR32)pVM->patm.s.pbPatchHelpersRC;
362 if (offPatchCode >= pVM->patm.s.cbPatchHelpers)
363 return VERR_PATCH_NOT_FOUND;
364
365 /*
366 * Patch helper memory.
367 */
368 uint32_t cbMaxRead = pVM->patm.s.cbPatchHelpers - (uint32_t)offPatchCode;
369 if (cbToRead > cbMaxRead)
370 cbToRead = cbMaxRead;
371#ifdef IN_RC
372 memcpy(pvDst, pVM->patm.s.pbPatchHelpersRC + (uint32_t)offPatchCode, cbToRead);
373#else
374 memcpy(pvDst, pVM->patm.s.pbPatchHelpersR3 + (uint32_t)offPatchCode, cbToRead);
375#endif
376 }
377 else
378 {
379 /*
380 * Patch memory.
381 */
382 uint32_t cbMaxRead = pVM->patm.s.cbPatchMem - (uint32_t)offPatchCode;
383 if (cbToRead > cbMaxRead)
384 cbToRead = cbMaxRead;
385#ifdef IN_RC
386 memcpy(pvDst, pVM->patm.s.pPatchMemGC + (uint32_t)offPatchCode, cbToRead);
387#else
388 memcpy(pvDst, pVM->patm.s.pPatchMemHC + (uint32_t)offPatchCode, cbToRead);
389#endif
390 }
391
392 if (pcbRead)
393 *pcbRead = cbToRead;
394 return VINF_SUCCESS;
395}
396
397/**
398 * Set parameters for pending MMIO patch operation
399 *
400 * @returns VBox status code.
401 * @param pVM The cross context VM structure.
402 * @param GCPhys MMIO physical address.
403 * @param pCachedData RC pointer to cached data.
404 */
405VMM_INT_DECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTRCPTR pCachedData)
406{
407 if (!HMIsEnabled(pVM))
408 {
409 pVM->patm.s.mmio.GCPhys = GCPhys;
410 pVM->patm.s.mmio.pCachedData = (RTRCPTR)pCachedData;
411 }
412
413 return VINF_SUCCESS;
414}
415
416/**
417 * Checks if the interrupt flag is enabled or not.
418 *
419 * @returns true if it's enabled.
420 * @returns false if it's disabled.
421 *
422 * @param pVM The cross context VM structure.
423 * @todo CPUM should wrap this, EM.cpp shouldn't call us.
424 */
425VMM_INT_DECL(bool) PATMAreInterruptsEnabled(PVM pVM)
426{
427 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
428
429 return PATMAreInterruptsEnabledByCtx(pVM, pCtx);
430}
431
432/**
433 * Checks if the interrupt flag is enabled or not.
434 *
435 * @returns true if it's enabled.
436 * @returns false if it's disabled.
437 *
438 * @param pVM The cross context VM structure.
439 * @param pCtx The guest CPU context.
440 * @todo CPUM should wrap this, EM.cpp shouldn't call us.
441 */
442VMM_INT_DECL(bool) PATMAreInterruptsEnabledByCtx(PVM pVM, PCPUMCTX pCtx)
443{
444 if (PATMIsEnabled(pVM))
445 {
446 Assert(!HMIsEnabled(pVM));
447 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
448 return false;
449 }
450 return !!(pCtx->eflags.u32 & X86_EFL_IF);
451}
452
453/**
454 * Check if the instruction is patched as a duplicated function
455 *
456 * @returns patch record
457 * @param pVM The cross context VM structure.
458 * @param pInstrGC Guest context point to the instruction
459 *
460 */
461PPATMPATCHREC patmQueryFunctionPatch(PVM pVM, RTRCPTR pInstrGC)
462{
463 PPATMPATCHREC pRec;
464
465 AssertCompile(sizeof(AVLOU32KEY) == sizeof(pInstrGC));
466 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
467 if ( pRec
468 && (pRec->patch.uState == PATCH_ENABLED)
469 && (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
470 )
471 return pRec;
472 return 0;
473}
474
475/**
476 * Checks if the int 3 was caused by a patched instruction
477 *
478 * @returns VBox status
479 *
480 * @param pVM The cross context VM structure.
481 * @param pInstrGC Instruction pointer
482 * @param pOpcode Original instruction opcode (out, optional)
483 * @param pSize Original instruction size (out, optional)
484 */
485VMM_INT_DECL(bool) PATMIsInt3Patch(PVM pVM, RTRCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
486{
487 PPATMPATCHREC pRec;
488 Assert(!HMIsEnabled(pVM));
489
490 pRec = (PPATMPATCHREC)RTAvloU32Get(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (AVLOU32KEY)pInstrGC);
491 if ( pRec
492 && (pRec->patch.uState == PATCH_ENABLED)
493 && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
494 )
495 {
496 if (pOpcode) *pOpcode = pRec->patch.opcode;
497 if (pSize) *pSize = pRec->patch.cbPrivInstr;
498 return true;
499 }
500 return false;
501}
502
503/**
504 * Emulate sysenter, sysexit and syscall instructions
505 *
506 * @returns VBox status
507 *
508 * @param pVM The cross context VM structure.
509 * @param pCtx The relevant guest cpu context.
510 * @param pCpu Disassembly state.
511 */
512VMMDECL(int) PATMSysCall(PVM pVM, PCPUMCTX pCtx, PDISCPUSTATE pCpu)
513{
514 Assert(CPUMQueryGuestCtxPtr(VMMGetCpu0(pVM)) == pCtx);
515 AssertReturn(!HMIsEnabled(pVM), VERR_PATM_HM_IPE);
516
517 if (pCpu->pCurInstr->uOpcode == OP_SYSENTER)
518 {
519 if ( pCtx->SysEnter.cs == 0
520 || pCtx->eflags.Bits.u1VM
521 || (pCtx->cs.Sel & X86_SEL_RPL) != 3
522 || pVM->patm.s.pfnSysEnterPatchGC == 0
523 || pVM->patm.s.pfnSysEnterGC != (RTRCPTR)(RTRCUINTPTR)pCtx->SysEnter.eip
524 || !(PATMRawGetEFlags(pVM, pCtx) & X86_EFL_IF))
525 goto end;
526
527 Log2(("PATMSysCall: sysenter from %RRv to %RRv\n", pCtx->eip, pVM->patm.s.pfnSysEnterPatchGC));
528 /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
529 /** @note The Intel manual suggests that the OS is responsible for this. */
530 pCtx->cs.Sel = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
531 pCtx->eip = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
532 pCtx->ss.Sel = pCtx->cs.Sel + 8; /* SysEnter.cs + 8 */
533 pCtx->esp = pCtx->SysEnter.esp;
534 pCtx->eflags.u32 &= ~(X86_EFL_VM | X86_EFL_RF);
535 pCtx->eflags.u32 |= X86_EFL_IF;
536
537 /* Turn off interrupts. */
538 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;
539
540 STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);
541
542 return VINF_SUCCESS;
543 }
544 if (pCpu->pCurInstr->uOpcode == OP_SYSEXIT)
545 {
546 if ( pCtx->SysEnter.cs == 0
547 || (pCtx->cs.Sel & X86_SEL_RPL) != 1
548 || pCtx->eflags.Bits.u1VM
549 || !(PATMRawGetEFlags(pVM, pCtx) & X86_EFL_IF))
550 goto end;
551
552 Log2(("PATMSysCall: sysexit from %RRv to %RRv\n", pCtx->eip, pCtx->edx));
553
554 pCtx->cs.Sel = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
555 pCtx->eip = pCtx->edx;
556 pCtx->ss.Sel = pCtx->cs.Sel + 8; /* SysEnter.cs + 24 */
557 pCtx->esp = pCtx->ecx;
558
559 STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);
560
561 return VINF_SUCCESS;
562 }
563 if (pCpu->pCurInstr->uOpcode == OP_SYSCALL)
564 {
565 /** @todo implement syscall */
566 }
567 else
568 if (pCpu->pCurInstr->uOpcode == OP_SYSRET)
569 {
570 /** @todo implement sysret */
571 }
572
573end:
574 return VINF_EM_RAW_RING_SWITCH;
575}
576
577/**
578 * Adds branch pair to the lookup cache of the particular branch instruction
579 *
580 * @returns VBox status
581 * @param pVM The cross context VM structure.
582 * @param pJumpTableGC Pointer to branch instruction lookup cache
583 * @param pBranchTarget Original branch target
584 * @param pRelBranchPatch Relative duplicated function address
585 */
586int patmAddBranchToLookupCache(PVM pVM, RTRCPTR pJumpTableGC, RTRCPTR pBranchTarget, RTRCUINTPTR pRelBranchPatch)
587{
588 PPATCHJUMPTABLE pJumpTable;
589
590 Log(("PATMAddBranchToLookupCache: Adding (%RRv->%RRv (%RRv)) to table %RRv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
591
592 AssertReturn(PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pJumpTableGC), VERR_INVALID_PARAMETER);
593
594#ifdef IN_RC
595 pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
596#else
597 pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
598#endif
599 Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
600 if (pJumpTable->cAddresses < pJumpTable->nrSlots)
601 {
602 uint32_t i;
603
604 for (i=0;i<pJumpTable->nrSlots;i++)
605 {
606 if (pJumpTable->Slot[i].pInstrGC == 0)
607 {
608 pJumpTable->Slot[i].pInstrGC = pBranchTarget;
609 /* Relative address - eases relocation */
610 pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
611 pJumpTable->cAddresses++;
612 break;
613 }
614 }
615 AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
616#ifdef VBOX_WITH_STATISTICS
617 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
618 if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
619 pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
620#endif
621 }
622 else
623 {
624 /* Replace an old entry. */
625 /** @todo replacement strategy isn't really bright. change to something better if required. */
626 Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
627 Assert((pJumpTable->nrSlots & 1) == 0);
628
629 pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
630 pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
631 /* Relative address - eases relocation */
632 pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
633
634 pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
635
636 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
637 }
638
639 return VINF_SUCCESS;
640}
641
642
643#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
644/**
645 * Return the name of the patched instruction
646 *
647 * @returns instruction name
648 *
649 * @param opcode DIS instruction opcode
650 * @param fPatchFlags Patch flags
651 */
652const char *patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags)
653{
654 const char *pszInstr = NULL;
655
656 switch (opcode)
657 {
658 case OP_CLI:
659 pszInstr = "cli";
660 break;
661 case OP_PUSHF:
662 pszInstr = "pushf";
663 break;
664 case OP_POPF:
665 pszInstr = "popf";
666 break;
667 case OP_STR:
668 pszInstr = "str";
669 break;
670 case OP_LSL:
671 pszInstr = "lsl";
672 break;
673 case OP_LAR:
674 pszInstr = "lar";
675 break;
676 case OP_SGDT:
677 pszInstr = "sgdt";
678 break;
679 case OP_SLDT:
680 pszInstr = "sldt";
681 break;
682 case OP_SIDT:
683 pszInstr = "sidt";
684 break;
685 case OP_SMSW:
686 pszInstr = "smsw";
687 break;
688 case OP_VERW:
689 pszInstr = "verw";
690 break;
691 case OP_VERR:
692 pszInstr = "verr";
693 break;
694 case OP_CPUID:
695 pszInstr = "cpuid";
696 break;
697 case OP_JMP:
698 pszInstr = "jmp";
699 break;
700 case OP_JO:
701 pszInstr = "jo";
702 break;
703 case OP_JNO:
704 pszInstr = "jno";
705 break;
706 case OP_JC:
707 pszInstr = "jc";
708 break;
709 case OP_JNC:
710 pszInstr = "jnc";
711 break;
712 case OP_JE:
713 pszInstr = "je";
714 break;
715 case OP_JNE:
716 pszInstr = "jne";
717 break;
718 case OP_JBE:
719 pszInstr = "jbe";
720 break;
721 case OP_JNBE:
722 pszInstr = "jnbe";
723 break;
724 case OP_JS:
725 pszInstr = "js";
726 break;
727 case OP_JNS:
728 pszInstr = "jns";
729 break;
730 case OP_JP:
731 pszInstr = "jp";
732 break;
733 case OP_JNP:
734 pszInstr = "jnp";
735 break;
736 case OP_JL:
737 pszInstr = "jl";
738 break;
739 case OP_JNL:
740 pszInstr = "jnl";
741 break;
742 case OP_JLE:
743 pszInstr = "jle";
744 break;
745 case OP_JNLE:
746 pszInstr = "jnle";
747 break;
748 case OP_JECXZ:
749 pszInstr = "jecxz";
750 break;
751 case OP_LOOP:
752 pszInstr = "loop";
753 break;
754 case OP_LOOPNE:
755 pszInstr = "loopne";
756 break;
757 case OP_LOOPE:
758 pszInstr = "loope";
759 break;
760 case OP_MOV:
761 if (fPatchFlags & PATMFL_IDTHANDLER)
762 pszInstr = "mov (Int/Trap Handler)";
763 else
764 pszInstr = "mov (cs)";
765 break;
766 case OP_SYSENTER:
767 pszInstr = "sysenter";
768 break;
769 case OP_PUSH:
770 pszInstr = "push (cs)";
771 break;
772 case OP_CALL:
773 pszInstr = "call";
774 break;
775 case OP_IRET:
776 pszInstr = "iret";
777 break;
778 }
779 return pszInstr;
780}
781#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette