VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/VMMGC/PATMGC.cpp@ 1403

Last change on this file since 1403 was 1359, checked in by vboxsync, 18 years ago

SELM function changes for v86 mode code.
CPL check fixes for V86 mode code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 20.6 KB
Line 
1/* $Id: PATMGC.cpp 1359 2007-03-09 10:40:44Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager - Guest Context
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PATM
27#include <VBox/cpum.h>
28#include <VBox/stam.h>
29#include <VBox/patm.h>
30#include <VBox/pgm.h>
31#include <VBox/mm.h>
32#include <VBox/sup.h>
33#include <VBox/mm.h>
34#include <VBox/param.h>
35#include <iprt/avl.h>
36#include "PATMInternal.h"
37#include "PATMA.h"
38#include <VBox/vm.h>
39#include <VBox/dbg.h>
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/em.h>
43#include <VBox/err.h>
44#include <VBox/selm.h>
45#include <VBox/log.h>
46#include <iprt/assert.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49#include <stdlib.h>
50#include <stdio.h>
51
52
53/**
54 * #PF Virtual Handler callback for Guest access a page monitored by PATM
55 *
56 * @returns VBox status code (appropritate for trap handling and GC return).
57 * @param pVM VM Handle.
58 * @param uErrorCode CPU Error code.
59 * @param pRegFrame Trap register frame.
60 * @param pvFault The fault address (cr2).
61 * @param pvRange The base address of the handled virtual range.
62 * @param offRange The offset of the access into this range.
63 * (If it's a EIP range this's the EIP, if not it's pvFault.)
64 */
65PATMGCDECL(int) PATMGCMonitorPage(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, void *pvRange, uintptr_t offRange)
66{
67 pVM->patm.s.pvFaultMonitor = pvFault;
68 return VINF_PATM_CHECK_PATCH_PAGE;
69}
70
71
72/**
73 * Checks if the write is located on a page with was patched before.
74 * (if so, then we are not allowed to turn on r/w)
75 *
76 * @returns VBox status
77 * @param pVM The VM to operate on.
78 * @param pRegFrame CPU context
79 * @param GCPtr GC pointer to write address
80 * @param cbWrite Nr of bytes to write
81 *
82 */
83PATMGCDECL(int) PATMGCHandleWriteToPatchPage(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR GCPtr, uint32_t cbWrite)
84{
85 RTGCUINTPTR pWritePageStart, pWritePageEnd;
86 PPATMPATCHPAGE pPatchPage;
87
88 /* Quick boundary check */
89 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
90 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
91 )
92 return VERR_PATCH_NOT_FOUND;
93
94 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWriteDetect, a);
95
96 pWritePageStart = (RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
97 pWritePageEnd = ((RTGCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
98
99 pPatchPage = (PPATMPATCHPAGE)RTAvloGCPtrGet(CTXSUFF(&pVM->patm.s.PatchLookupTree)->PatchTreeByPage, (RTGCPTR)pWritePageStart);
100 if ( !pPatchPage
101 && pWritePageStart != pWritePageEnd
102 )
103 {
104 pPatchPage = (PPATMPATCHPAGE)RTAvloGCPtrGet(CTXSUFF(&pVM->patm.s.PatchLookupTree)->PatchTreeByPage, (RTGCPTR)pWritePageEnd);
105 }
106
107#ifdef LOG_ENABLED
108 if (pPatchPage)
109 Log(("PATMIsWriteToPatchPage: Found page %VGv for write to %VGv %d bytes\n", pPatchPage->Core.Key, GCPtr, cbWrite));
110#endif
111
112 if (pPatchPage)
113 {
114 if ( pPatchPage->pLowestAddrGC <= (RTGCPTR)((RTGCUINTPTR)GCPtr + cbWrite)
115 || pPatchPage->pHighestAddrGC > GCPtr)
116 {
117 /* This part of the page was not patched; try to emulate the instruction. */
118 uint32_t cb;
119
120 LogFlow(("PATMHandleWriteToPatchPage: Interpret %VGv accessing %VGv\n", pRegFrame->eip, GCPtr));
121 int rc = EMInterpretInstruction(pVM, pRegFrame, GCPtr, &cb);
122 if (rc == VINF_SUCCESS)
123 {
124 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpreted);
125 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
126 return VINF_SUCCESS;
127 }
128 STAM_COUNTER_INC(&pVM->patm.s.StatPatchWriteInterpretedFailed);
129 }
130 HCPTRTYPE(PPATCHINFO) *paPatch = (HCPTRTYPE(PPATCHINFO) *)MMHyperHC2GC(pVM, pPatchPage->aPatch);
131
132 /* Increase the invalid write counter for each patch that's registered for that page. */
133 for (uint32_t i=0;i<pPatchPage->cCount;i++)
134 {
135 PPATCHINFO pPatch = (PPATCHINFO)MMHyperHC2GC(pVM, paPatch[i]);
136
137 pPatch->cInvalidWrites++;
138 }
139
140 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
141 return VINF_EM_RAW_EMULATE_INSTR;
142 }
143
144 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWriteDetect, a);
145 return VERR_PATCH_NOT_FOUND;
146}
147
148
149/**
150 * Checks if the illegal instruction was caused by a patched instruction
151 *
152 * @returns VBox status
153 *
154 * @param pVM The VM handle.
155 * @param pCtxCore The relevant core context.
156 */
157PATMDECL(int) PATMGCHandleIllegalInstrTrap(PVM pVM, PCPUMCTXCORE pRegFrame)
158{
159 PPATMPATCHREC pRec;
160 int rc;
161
162 /* Very important check -> otherwise we have a security leak. */
163 AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss & X86_SEL_RPL) == 1, VERR_ACCESS_DENIED);
164 Assert(PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->eip));
165
166 /* OP_ILLUD2 in PATM generated code? */
167 if (CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
168 {
169 LogFlow(("PATMGC: Pending action %x at %VGv\n", CTXSUFF(pVM->patm.s.pGCState)->uPendingAction, pRegFrame->eip));
170
171 /* Private PATM interface (@todo hack due to lack of anything generic). */
172 /* Parameters:
173 * eax = Pending action (currently PATM_ACTION_LOOKUP_ADDRESS)
174 * ecx = PATM_ACTION_MAGIC
175 */
176 if ( (pRegFrame->eax & CTXSUFF(pVM->patm.s.pGCState)->uPendingAction)
177 && pRegFrame->ecx == PATM_ACTION_MAGIC
178 )
179 {
180 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
181
182 switch (pRegFrame->eax)
183 {
184 case PATM_ACTION_LOOKUP_ADDRESS:
185 {
186 /* Parameters:
187 * edx = GC address to find
188 * edi = PATCHJUMPTABLE ptr
189 */
190 AssertMsg(!pRegFrame->edi || PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->edi), ("edx = %VGv\n", pRegFrame->edi));
191
192 Log(("PATMGC: lookup %VGv jump table=%VGv\n", pRegFrame->edx, pRegFrame->edi));
193
194 pRec = PATMQueryFunctionPatch(pVM, (RTGCPTR)(pRegFrame->edx));
195 if (pRec)
196 {
197 if (pRec->patch.uState == PATCH_ENABLED)
198 {
199 RTGCUINTPTR pRelAddr = pRec->patch.pPatchBlockOffset; /* make it relative */
200 rc = PATMAddBranchToLookupCache(pVM, (RTGCPTR)pRegFrame->edi, (RTGCPTR)pRegFrame->edx, pRelAddr);
201 if (rc == VINF_SUCCESS)
202 {
203 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
204 pRegFrame->eax = pRelAddr;
205 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionFound);
206 return VINF_SUCCESS;
207 }
208 AssertFailed();
209 }
210 else
211 {
212 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
213 pRegFrame->eax = 0; /* make it fault */
214 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
215 return VINF_SUCCESS;
216 }
217 }
218 else
219 {
220#if 0
221 if (pRegFrame->edx == 0x806eca98)
222 {
223 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
224 pRegFrame->eax = 0; /* make it fault */
225 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
226 return VINF_SUCCESS;
227 }
228#endif
229 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionNotFound);
230 return VINF_PATM_DUPLICATE_FUNCTION;
231 }
232 }
233
234 case PATM_ACTION_DISPATCH_PENDING_IRQ:
235 /* Parameters:
236 * edi = GC address to jump to
237 */
238 Log(("PATMGC: Dispatch pending interrupt; eip=%VGv->%VGv\n", pRegFrame->eip, pRegFrame->edi));
239
240 /* Change EIP to the guest address the patch would normally jump to after setting IF. */
241 pRegFrame->eip = pRegFrame->edi;
242
243 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
244 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
245
246 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
247 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
248 pRegFrame->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
249
250 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
251
252 /* We are no longer executing PATM code; set PIF again. */
253 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
254
255 STAM_COUNTER_INC(&pVM->patm.s.StatCheckPendingIRQ);
256
257 /* The caller will call trpmGCExitTrap, which will dispatch pending interrupts for us. */
258 return VINF_SUCCESS;
259
260 case PATM_ACTION_PENDING_IRQ_AFTER_IRET:
261 /* Parameters:
262 * edi = GC address to jump to
263 */
264 Log(("PATMGC: Dispatch pending interrupt (iret); eip=%VGv->%VGv\n", pRegFrame->eip, pRegFrame->edi));
265 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX|PATM_RESTORE_EDI));
266 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
267
268 /* Change EIP to the guest address of the iret. */
269 pRegFrame->eip = pRegFrame->edi;
270
271 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
272 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
273 pRegFrame->edi = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEDI;
274 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
275
276 /* We are no longer executing PATM code; set PIF again. */
277 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
278
279 return VINF_PATM_PENDING_IRQ_AFTER_IRET;
280
281 case PATM_ACTION_DO_V86_IRET:
282 {
283 Log(("PATMGC: Do iret to V86 code; eip=%VGv\n", pRegFrame->eip));
284 Assert(pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags == (PATM_RESTORE_EAX|PATM_RESTORE_ECX));
285 Assert(pVM->patm.s.CTXSUFF(pGCState)->fPIF == 0);
286
287 pRegFrame->eax = pVM->patm.s.CTXSUFF(pGCState)->Restore.uEAX;
288 pRegFrame->ecx = pVM->patm.s.CTXSUFF(pGCState)->Restore.uECX;
289 pVM->patm.s.CTXSUFF(pGCState)->Restore.uFlags = 0;
290
291 rc = EMInterpretIret(pVM, pRegFrame);
292 if (VBOX_SUCCESS(rc))
293 {
294 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIret);
295
296 /* We are no longer executing PATM code; set PIF again. */
297 pVM->patm.s.CTXSUFF(pGCState)->fPIF = 1;
298 CPUMGCCallV86Code(pRegFrame);
299 /* does not return */
300 }
301 else
302 STAM_COUNTER_INC(&pVM->patm.s.StatEmulIretFailed);
303 return rc;
304 }
305
306#ifdef DEBUG
307 case PATM_ACTION_LOG_CLI:
308 Log(("PATMGC: CLI at %VGv (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
309 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
310 return VINF_SUCCESS;
311
312 case PATM_ACTION_LOG_STI:
313 Log(("PATMGC: STI at %VGv (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
314 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
315 return VINF_SUCCESS;
316
317 case PATM_ACTION_LOG_POPF_IF1:
318 Log(("PATMGC: POPF setting IF at %VGv (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
319 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
320 return VINF_SUCCESS;
321
322 case PATM_ACTION_LOG_POPF_IF0:
323 Log(("PATMGC: POPF at %VGv (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
324 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
325 return VINF_SUCCESS;
326
327 case PATM_ACTION_LOG_PUSHF:
328 Log(("PATMGC: PUSHF at %VGv (current IF=%d iopl=%d)\n", pRegFrame->eip, !!(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & X86_EFL_IF), X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags) ));
329 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
330 return VINF_SUCCESS;
331
332 case PATM_ACTION_LOG_IF1:
333 Log(("PATMGC: IF=1 escape from %VGv\n", pRegFrame->eip));
334 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
335 return VINF_SUCCESS;
336
337 case PATM_ACTION_LOG_IRET:
338 {
339 char *pIretFrame = (char *)pRegFrame->edx;
340 uint32_t eip, selCS, uEFlags;
341
342 rc = MMGCRamRead(pVM, &eip, pIretFrame, 4);
343 rc |= MMGCRamRead(pVM, &selCS, pIretFrame + 4, 4);
344 rc |= MMGCRamRead(pVM, &uEFlags, pIretFrame + 8, 4);
345 if (rc == VINF_SUCCESS)
346 {
347 if ( (uEFlags & X86_EFL_VM)
348 || (selCS & X86_SEL_RPL) == 3)
349 {
350 uint32_t selSS, esp;
351
352 rc |= MMGCRamRead(pVM, &esp, pIretFrame + 12, 4);
353 rc |= MMGCRamRead(pVM, &selSS, pIretFrame + 16, 4);
354
355 if (uEFlags & X86_EFL_VM)
356 {
357 uint32_t selDS, selES, selFS, selGS;
358 rc = MMGCRamRead(pVM, &selES, pIretFrame + 20, 4);
359 rc |= MMGCRamRead(pVM, &selDS, pIretFrame + 24, 4);
360 rc |= MMGCRamRead(pVM, &selFS, pIretFrame + 28, 4);
361 rc |= MMGCRamRead(pVM, &selGS, pIretFrame + 32, 4);
362 if (rc == VINF_SUCCESS)
363 {
364 Log(("PATMGC: IRET->VM stack frame: return address %04X:%VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));
365 Log(("PATMGC: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
366 }
367 }
368 else
369 Log(("PATMGC: IRET stack frame: return address %04X:%VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));
370 }
371 else
372 Log(("PATMGC: IRET stack frame: return address %04X:%VGv eflags=%08x\n", selCS, eip, uEFlags));
373 }
374 Log(("PATMGC: IRET from %VGv (IF->1) current eflags=%x\n", pRegFrame->eip, pVM->patm.s.CTXSUFF(pGCState)->uVMFlags));
375 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
376 return VINF_SUCCESS;
377 }
378
379 case PATM_ACTION_LOG_RET:
380 Log(("PATMGC: RET to %VGv ESP=%VGv iopl=%d\n", pRegFrame->edx, pRegFrame->ebx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
381 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
382 return VINF_SUCCESS;
383
384 case PATM_ACTION_LOG_CALL:
385 Log(("PATMGC: CALL to %VGv return addr %VGv ESP=%VGv iopl=%d\n", pVM->patm.s.CTXSUFF(pGCState)->GCCallPatchTargetAddr, pVM->patm.s.CTXSUFF(pGCState)->GCCallReturnAddr, pRegFrame->edx, X86_EFL_GET_IOPL(pVM->patm.s.CTXSUFF(pGCState)->uVMFlags)));
386 pRegFrame->eip += PATM_ILLEGAL_INSTR_SIZE;
387 return VINF_SUCCESS;
388#endif
389 default:
390 AssertFailed();
391 break;
392 }
393 }
394 else
395 AssertFailed();
396 CTXSUFF(pVM->patm.s.pGCState)->uPendingAction = 0;
397 }
398 AssertMsgFailed(("Unexpected OP_ILLUD2 in patch code at %VGv (pending action %x)!!!!\n", pRegFrame->eip, CTXSUFF(pVM->patm.s.pGCState)->uPendingAction));
399 return VINF_EM_RAW_EMULATE_INSTR;
400}
401
402/**
403 * Checks if the int 3 was caused by a patched instruction
404 *
405 * @returns VBox status
406 *
407 * @param pVM The VM handle.
408 * @param pCtxCore The relevant core context.
409 */
410PATMDECL(int) PATMHandleInt3PatchTrap(PVM pVM, PCPUMCTXCORE pRegFrame)
411{
412 PPATMPATCHREC pRec;
413 int rc;
414
415 AssertReturn(!pRegFrame->eflags.Bits.u1VM && (pRegFrame->ss & X86_SEL_RPL) == 1, VERR_ACCESS_DENIED);
416
417 /* Int 3 in PATM generated code? (most common case) */
418 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pRegFrame->eip))
419 {
420 /* @note hardcoded assumption about it being a single byte int 3 instruction. */
421 pRegFrame->eip--;
422 return VINF_PATM_PATCH_INT3;
423 }
424
425 /** @todo could use simple caching here to speed things up. */
426 pRec = (PPATMPATCHREC)RTAvloGCPtrGet(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, (RTGCPTR)(pRegFrame->eip - 1)); /* eip is pointing to the instruction *after* 'int 3' already */
427 if (pRec && pRec->patch.uState == PATCH_ENABLED)
428 {
429 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT_BLOCK)
430 {
431 Assert(pRec->patch.opcode == OP_CLI);
432 /* This is a special cli block that was turned into an int 3 patch. We jump to the generated code manually. */
433 pRegFrame->eip = (uint32_t)PATCHCODE_PTR_GC(&pRec->patch);
434 STAM_COUNTER_INC(&pVM->patm.s.StatInt3BlockRun);
435 return VINF_SUCCESS;
436 }
437 else
438 if (pRec->patch.flags & PATMFL_INT3_REPLACEMENT)
439 {
440 uint32_t size, cbOp;
441 DISCPUSTATE cpu;
442
443 /* eip is pointing to the instruction *after* 'int 3' already */
444 pRegFrame->eip = pRegFrame->eip - 1;
445
446 PATM_STAT_RUN_INC(&pRec->patch);
447
448 Log(("PATMHandleInt3PatchTrap found int3 for %s at %VGv\n", patmGetInstructionString(pRec->patch.opcode, 0), pRegFrame->eip));
449
450 switch(pRec->patch.opcode)
451 {
452 case OP_CPUID:
453 case OP_IRET:
454 break;
455
456 case OP_STR:
457 case OP_SGDT:
458 case OP_SLDT:
459 case OP_SIDT:
460 case OP_LSL:
461 case OP_LAR:
462 case OP_SMSW:
463 case OP_VERW:
464 case OP_VERR:
465 default:
466 PATM_STAT_FAULT_INC(&pRec->patch);
467 pRec->patch.cTraps++;
468 return VINF_EM_RAW_EMULATE_INSTR;
469 }
470
471 cpu.mode = SELMIsSelector32Bit(pVM, pRegFrame->eflags, pRegFrame->cs, 0) ? CPUMODE_32BIT : CPUMODE_16BIT;
472 if(cpu.mode != CPUMODE_32BIT)
473 {
474 AssertFailed();
475 return VINF_EM_RAW_EMULATE_INSTR;
476 }
477 rc = DISCoreOne(&cpu, (RTUINTPTR)&pRec->patch.aPrivInstr[0], &cbOp);
478 if (VBOX_FAILURE(rc))
479 {
480 Log(("DISCoreOne failed with %Vrc\n", rc));
481 PATM_STAT_FAULT_INC(&pRec->patch);
482 pRec->patch.cTraps++;
483 return VINF_EM_RAW_EMULATE_INSTR;
484 }
485
486 rc = EMInterpretInstructionCPU(pVM, &cpu, pRegFrame, 0 /* not relevant here */, &size);
487 if (rc != VINF_SUCCESS)
488 {
489 Log(("EMInterpretInstructionCPU failed with %Vrc\n", rc));
490 PATM_STAT_FAULT_INC(&pRec->patch);
491 pRec->patch.cTraps++;
492 return VINF_EM_RAW_EMULATE_INSTR;
493 }
494
495 pRegFrame->eip += cpu.opsize;
496 return VINF_SUCCESS;
497 }
498 }
499 return VERR_PATCH_NOT_FOUND;
500}
501
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette