VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/VMMAll/PATMAll.cpp@ 5070

Last change on this file since 5070 was 4071, checked in by vboxsync, 17 years ago

Biggest check-in ever. New source code headers for all (C) innotek files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 20.2 KB
Line 
1/* $Id: PATMAll.cpp 4071 2007-08-07 17:07:59Z vboxsync $ */
2/** @file
3 * PATM - The Patch Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PATM
22#include <VBox/patm.h>
23#include <VBox/cpum.h>
24#include <VBox/dis.h>
25#include <VBox/disopcode.h>
26#include <VBox/em.h>
27#include <VBox/err.h>
28#include <VBox/selm.h>
29#include <VBox/mm.h>
30#include "PATMInternal.h"
31#include <VBox/vm.h>
32#include "PATMA.h"
33
34#include <VBox/log.h>
35#include <iprt/assert.h>
36
37
38/**
39 * Load virtualized flags.
40 *
41 * This function is called from CPUMRawEnter(). It doesn't have to update the
42 * IF and IOPL eflags bits, the caller will enforce those to set and 0 repectively.
43 *
44 * @param pVM VM handle.
45 * @param pCtxCore The cpu context core.
46 * @see pg_raw
47 */
48PATMDECL(void) PATMRawEnter(PVM pVM, PCPUMCTXCORE pCtxCore)
49{
50 bool fPatchCode = PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtxCore->eip);
51
52 /*
53 * Currently we don't bother to check whether PATM is enabled or not.
54 * For all cases where it isn't, IOPL will be safe and IF will be set.
55 */
56 register uint32_t efl = pCtxCore->eflags.u32;
57 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
58 AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%VGv eflags=%08x fPATM=%d pPATMGC=%VGv-%VGv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));
59
60 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%VGv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip));
61
62 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
63 efl |= X86_EFL_IF;
64 pCtxCore->eflags.u32 = efl;
65
66#ifdef IN_RING3
67#ifdef PATM_EMULATE_SYSENTER
68 PCPUMCTX pCtx;
69 int rc;
70
71 /* Check if the sysenter handler has changed. */
72 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
73 AssertRC(rc);
74 if ( rc == VINF_SUCCESS
75 && pCtx->SysEnter.cs != 0
76 && pCtx->SysEnter.eip != 0
77 )
78 {
79 if (pVM->patm.s.pfnSysEnterGC != (RTGCPTR)pCtx->SysEnter.eip)
80 {
81 pVM->patm.s.pfnSysEnterPatchGC = 0;
82 pVM->patm.s.pfnSysEnterGC = 0;
83
84 Log2(("PATMRawEnter: installing sysenter patch for %VGv\n", pCtx->SysEnter.eip));
85 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
86 if (pVM->patm.s.pfnSysEnterPatchGC == 0)
87 {
88 rc = PATMR3InstallPatch(pVM, pCtx->SysEnter.eip, PATMFL_SYSENTER | PATMFL_CODE32);
89 if (rc == VINF_SUCCESS)
90 {
91 pVM->patm.s.pfnSysEnterPatchGC = PATMR3QueryPatchGCPtr(pVM, pCtx->SysEnter.eip);
92 pVM->patm.s.pfnSysEnterGC = (RTGCPTR)pCtx->SysEnter.eip;
93 Assert(pVM->patm.s.pfnSysEnterPatchGC);
94 }
95 }
96 else
97 pVM->patm.s.pfnSysEnterGC = (RTGCPTR)pCtx->SysEnter.eip;
98 }
99 }
100 else
101 {
102 pVM->patm.s.pfnSysEnterPatchGC = 0;
103 pVM->patm.s.pfnSysEnterGC = 0;
104 }
105#endif
106#endif
107}
108
109
110/**
111 * Restores virtualized flags.
112 *
113 * This function is called from CPUMRawLeave(). It will update the eflags register.
114 *
115 ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!!
116 *
117 * @param pVM VM handle.
118 * @param pCtxCore The cpu context core.
119 * @param rawRC Raw mode return code
120 * @see @ref pg_raw
121 */
122PATMDECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC)
123{
124 bool fPatchCode = PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtxCore->eip);
125 /*
126 * We will only be called if PATMRawEnter was previously called.
127 */
128 register uint32_t efl = pCtxCore->eflags.u32;
129 efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK);
130 pCtxCore->eflags.u32 = efl;
131 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF;
132
133 AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || VBOX_FAILURE(rawRC), ("Inconsistent state at %VGv rc=%Vrc\n", pCtxCore->eip, rawRC));
134 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || VBOX_FAILURE(rawRC), ("fPIF=%d eip=%VGv rc=%Vrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC));
135
136#ifdef IN_RING3
137 if ( (efl & X86_EFL_IF)
138 && fPatchCode
139 )
140 {
141 if ( rawRC < VINF_PATM_LEAVEGC_FIRST
142 || rawRC > VINF_PATM_LEAVEGC_LAST)
143 {
144 /*
145 * Golden rules:
146 * - Don't interrupt special patch streams that replace special instructions
147 * - Don't break instruction fusing (sti, pop ss, mov ss)
148 * - Don't go back to an instruction that has been overwritten by a patch jump
149 * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect
150 *
151 */
152 if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */
153 {
154 PATMTRANSSTATE enmState;
155 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState);
156
157 AssertRelease(pOrgInstrGC);
158
159 Assert(enmState != PATMTRANS_OVERWRITTEN);
160 if (enmState == PATMTRANS_SAFE)
161 {
162 Assert(!PATMFindActivePatchByEntrypoint(pVM, pOrgInstrGC));
163 Log(("Switchback from %VGv to %VGv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp));
164 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack);
165 pCtxCore->eip = pOrgInstrGC;
166 fPatchCode = false; /* to reset the stack ptr */
167
168 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */
169 }
170 else
171 {
172 LogFlow(("Patch address %VGv can't be interrupted (state=%d)!\n", pCtxCore->eip, enmState));
173 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
174 }
175 }
176 else
177 {
178 LogFlow(("Patch address %VGv can't be interrupted (fPIF=%d)!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF));
179 STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail);
180 }
181 }
182 }
183#else /* !IN_RING3 */
184 AssertMsgFailed(("!IN_RING3"));
185#endif /* !IN_RING3 */
186
187 if (!fPatchCode)
188 {
189 if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTGCPTR)pCtxCore->eip)
190 {
191 EMSetInhibitInterruptsPC(pVM, pCtxCore->eip);
192 }
193 CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0;
194
195 /* Reset the stack pointer to the top of the stack. */
196#ifdef DEBUG
197 if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE)
198 {
199 LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp));
200 }
201#endif
202 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
203 }
204}
205
206/**
207 * Get the EFLAGS.
208 * This is a worker for CPUMRawGetEFlags().
209 *
210 * @returns The eflags.
211 * @param pVM The VM handle.
212 * @param pCtxCore The context core.
213 */
214PATMDECL(uint32_t) PATMRawGetEFlags(PVM pVM, PCCPUMCTXCORE pCtxCore)
215{
216 uint32_t efl = pCtxCore->eflags.u32;
217 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
218 efl |= pVM->patm.s.CTXSUFF(pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK;
219 return efl;
220}
221
222/**
223 * Updates the EFLAGS.
224 * This is a worker for CPUMRawSetEFlags().
225 *
226 * @param pVM The VM handle.
227 * @param pCtxCore The context core.
228 * @param efl The new EFLAGS value.
229 */
230PATMDECL(void) PATMRawSetEFlags(PVM pVM, PCPUMCTXCORE pCtxCore, uint32_t efl)
231{
232 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK;
233 efl &= ~PATM_VIRTUAL_FLAGS_MASK;
234 efl |= X86_EFL_IF;
235 pCtxCore->eflags.u32 = efl;
236}
237
238/**
239 * Check if we must use raw mode (patch code being executed)
240 *
241 * @param pVM VM handle.
242 * @param pAddrGC Guest context address
243 */
244PATMDECL(bool) PATMShouldUseRawMode(PVM pVM, RTGCPTR pAddrGC)
245{
246 return ( PATMIsEnabled(pVM)
247 && ((pAddrGC >= pVM->patm.s.pPatchMemGC && pAddrGC < pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem))) ? true : false;
248}
249
250/**
251 * Returns the guest context pointer and size of the GC context structure
252 *
253 * @returns VBox status code.
254 * @param pVM The VM to operate on.
255 */
256PATMDECL(GCPTRTYPE(PPATMGCSTATE)) PATMQueryGCState(PVM pVM)
257{
258 return pVM->patm.s.pGCStateGC;
259}
260
261/**
262 * Checks whether the GC address is part of our patch region
263 *
264 * @returns VBox status code.
265 * @param pVM The VM to operate on.
266 * @param pAddrGC Guest context address
267 */
268PATMDECL(bool) PATMIsPatchGCAddr(PVM pVM, RTGCPTR pAddrGC)
269{
270 return (PATMIsEnabled(pVM) && pAddrGC >= pVM->patm.s.pPatchMemGC && pAddrGC < pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem) ? true : false;
271}
272
273/**
274 * Set parameters for pending MMIO patch operation
275 *
276 * @returns VBox status code.
277 * @param pDevIns Device instance.
278 * @param GCPhys MMIO physical address
279 * @param pCachedData GC pointer to cached data
280 */
281PATMDECL(int) PATMSetMMIOPatchInfo(PVM pVM, RTGCPHYS GCPhys, RTGCPTR pCachedData)
282{
283 pVM->patm.s.mmio.GCPhys = GCPhys;
284 pVM->patm.s.mmio.pCachedData = pCachedData;
285
286 return VINF_SUCCESS;
287}
288
289/**
290 * Checks if the interrupt flag is enabled or not.
291 *
292 * @returns true if it's enabled.
293 * @returns false if it's diabled.
294 *
295 * @param pVM The VM handle.
296 */
297PATMDECL(bool) PATMAreInterruptsEnabled(PVM pVM)
298{
299 PCPUMCTX pCtx = 0;
300 int rc;
301
302 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
303 AssertRC(rc);
304
305 return PATMAreInterruptsEnabledByCtxCore(pVM, CPUMCTX2CORE(pCtx));
306}
307
308/**
309 * Checks if the interrupt flag is enabled or not.
310 *
311 * @returns true if it's enabled.
312 * @returns false if it's diabled.
313 *
314 * @param pVM The VM handle.
315 * @param pCtxCore CPU context
316 */
317PATMDECL(bool) PATMAreInterruptsEnabledByCtxCore(PVM pVM, PCPUMCTXCORE pCtxCore)
318{
319 if (PATMIsEnabled(pVM))
320 {
321 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtxCore->eip))
322 return false;
323 }
324 return !!(pCtxCore->eflags.u32 & X86_EFL_IF);
325}
326
327/**
328 * Check if the instruction is patched as a duplicated function
329 *
330 * @returns patch record
331 * @param pVM The VM to operate on.
332 * @param pInstrGC Guest context point to the instruction
333 *
334 */
335PATMDECL(PPATMPATCHREC) PATMQueryFunctionPatch(PVM pVM, RTGCPTR pInstrGC)
336{
337 PPATMPATCHREC pRec;
338
339 pRec = (PPATMPATCHREC)RTAvloGCPtrGet(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, pInstrGC);
340 if ( pRec
341 && (pRec->patch.uState == PATCH_ENABLED)
342 && (pRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CALLABLE_AS_FUNCTION))
343 )
344 return pRec;
345 return 0;
346}
347
348/**
349 * Checks if the int 3 was caused by a patched instruction
350 *
351 * @returns VBox status
352 *
353 * @param pVM The VM handle.
354 * @param pInstrGC Instruction pointer
355 * @param pOpcode Original instruction opcode (out, optional)
356 * @param pSize Original instruction size (out, optional)
357 */
358PATMDECL(bool) PATMIsInt3Patch(PVM pVM, RTGCPTR pInstrGC, uint32_t *pOpcode, uint32_t *pSize)
359{
360 PPATMPATCHREC pRec;
361
362 pRec = (PPATMPATCHREC)RTAvloGCPtrGet(&CTXSUFF(pVM->patm.s.PatchLookupTree)->PatchTree, pInstrGC);
363 if ( pRec
364 && (pRec->patch.uState == PATCH_ENABLED)
365 && (pRec->patch.flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
366 )
367 {
368 if (pOpcode) *pOpcode = pRec->patch.opcode;
369 if (pSize) *pSize = pRec->patch.cbPrivInstr;
370 return true;
371 }
372 return false;
373}
374
375/**
376 * Emulate sysenter, sysexit and syscall instructions
377 *
378 * @returns VBox status
379 *
380 * @param pVM The VM handle.
381 * @param pCtxCore The relevant core context.
382 * @param pCpu Disassembly context
383 */
384PATMDECL(int) PATMSysCall(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
385{
386 PCPUMCTX pCtx;
387 int rc;
388
389 rc = CPUMQueryGuestCtxPtr(pVM, &pCtx);
390 AssertRCReturn(rc, VINF_EM_RAW_RING_SWITCH);
391
392 if (pCpu->pCurInstr->opcode == OP_SYSENTER)
393 {
394 if ( pCtx->SysEnter.cs == 0
395 || pRegFrame->eflags.Bits.u1VM
396 || (pRegFrame->cs & X86_SEL_RPL) != 3
397 || pVM->patm.s.pfnSysEnterPatchGC == 0
398 || pVM->patm.s.pfnSysEnterGC != (RTGCPTR)pCtx->SysEnter.eip
399 || !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
400 goto end;
401
402 Log2(("PATMSysCall: sysenter from %VGv to %VGv\n", pRegFrame->eip, pVM->patm.s.pfnSysEnterPatchGC));
403 /** @todo the base and limit are forced to 0 & 4G-1 resp. We assume the selector is wide open here. */
404 /** @note The Intel manual suggests that the OS is responsible for this. */
405 pRegFrame->cs = (pCtx->SysEnter.cs & ~X86_SEL_RPL) | 1;
406 pRegFrame->eip = /** @todo ugly conversion! */(uint32_t)pVM->patm.s.pfnSysEnterPatchGC;
407 pRegFrame->ss = pRegFrame->cs + 8; /* SysEnter.cs + 8 */
408 pRegFrame->esp = pCtx->SysEnter.esp;
409 pRegFrame->eflags.u32 &= ~(X86_EFL_VM|X86_EFL_RF);
410 pRegFrame->eflags.u32 |= X86_EFL_IF;
411
412 /* Turn off interrupts. */
413 pVM->patm.s.CTXSUFF(pGCState)->uVMFlags &= ~X86_EFL_IF;
414
415 STAM_COUNTER_INC(&pVM->patm.s.StatSysEnter);
416
417 return VINF_SUCCESS;
418 }
419 else
420 if (pCpu->pCurInstr->opcode == OP_SYSEXIT)
421 {
422 if ( pCtx->SysEnter.cs == 0
423 || (pRegFrame->cs & X86_SEL_RPL) != 1
424 || pRegFrame->eflags.Bits.u1VM
425 || !(PATMRawGetEFlags(pVM, pRegFrame) & X86_EFL_IF))
426 goto end;
427
428 Log2(("PATMSysCall: sysexit from %VGv to %VGv\n", pRegFrame->eip, pRegFrame->edx));
429
430 pRegFrame->cs = ((pCtx->SysEnter.cs + 16) & ~X86_SEL_RPL) | 3;
431 pRegFrame->eip = pRegFrame->edx;
432 pRegFrame->ss = pRegFrame->cs + 8; /* SysEnter.cs + 24 */
433 pRegFrame->esp = pRegFrame->ecx;
434
435 STAM_COUNTER_INC(&pVM->patm.s.StatSysExit);
436
437 return VINF_SUCCESS;
438 }
439 else
440 if (pCpu->pCurInstr->opcode == OP_SYSCALL)
441 {
442 /** @todo implement syscall */
443 }
444 else
445 if (pCpu->pCurInstr->opcode == OP_SYSRET)
446 {
447 /** @todo implement sysret */
448 }
449
450end:
451 return VINF_EM_RAW_RING_SWITCH;
452}
453
454/**
455 * Adds branch pair to the lookup cache of the particular branch instruction
456 *
457 * @returns VBox status
458 * @param pVM The VM to operate on.
459 * @param pJumpTableGC Pointer to branch instruction lookup cache
460 * @param pBranchTarget Original branch target
461 * @param pRelBranchPatch Relative duplicated function address
462 */
463PATMDECL(int) PATMAddBranchToLookupCache(PVM pVM, RTGCPTR pJumpTableGC, RTGCPTR pBranchTarget, RTGCUINTPTR pRelBranchPatch)
464{
465 PPATCHJUMPTABLE pJumpTable;
466
467 Log(("PATMAddBranchToLookupCache: Adding (%VGv->%VGv (%VGv)) to table %VGv\n", pBranchTarget, pRelBranchPatch + pVM->patm.s.pPatchMemGC, pRelBranchPatch, pJumpTableGC));
468
469 AssertReturn(PATMIsPatchGCAddr(pVM, pJumpTableGC), VERR_INVALID_PARAMETER);
470
471#ifdef IN_GC
472 pJumpTable = (PPATCHJUMPTABLE) pJumpTableGC;
473#else
474 pJumpTable = (PPATCHJUMPTABLE) (pJumpTableGC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemHC);
475#endif
476 Log(("Nr addresses = %d, insert pos = %d\n", pJumpTable->cAddresses, pJumpTable->ulInsertPos));
477 if (pJumpTable->cAddresses < pJumpTable->nrSlots)
478 {
479 uint32_t i;
480
481 for (i=0;i<pJumpTable->nrSlots;i++)
482 {
483 if (pJumpTable->Slot[i].pInstrGC == 0)
484 {
485 pJumpTable->Slot[i].pInstrGC = pBranchTarget;
486 /* Relative address - eases relocation */
487 pJumpTable->Slot[i].pRelPatchGC = pRelBranchPatch;
488 pJumpTable->cAddresses++;
489 break;
490 }
491 }
492 AssertReturn(i < pJumpTable->nrSlots, VERR_INTERNAL_ERROR);
493#ifdef VBOX_WITH_STATISTICS
494 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupInsert);
495 if (pVM->patm.s.StatU32FunctionMaxSlotsUsed < i)
496 pVM->patm.s.StatU32FunctionMaxSlotsUsed = i + 1;
497#endif
498 }
499 else
500 {
501 /* Replace an old entry. */
502 /** @todo replacement strategy isn't really bright. change to something better if required. */
503 Assert(pJumpTable->ulInsertPos < pJumpTable->nrSlots);
504 Assert((pJumpTable->nrSlots & 1) == 0);
505
506 pJumpTable->ulInsertPos &= (pJumpTable->nrSlots-1);
507 pJumpTable->Slot[pJumpTable->ulInsertPos].pInstrGC = pBranchTarget;
508 /* Relative address - eases relocation */
509 pJumpTable->Slot[pJumpTable->ulInsertPos].pRelPatchGC = pRelBranchPatch;
510
511 pJumpTable->ulInsertPos = (pJumpTable->ulInsertPos+1) & (pJumpTable->nrSlots-1);
512
513 STAM_COUNTER_INC(&pVM->patm.s.StatFunctionLookupReplace);
514 }
515
516 return VINF_SUCCESS;
517}
518
519
520/**
521 * Return the name of the patched instruction
522 *
523 * @returns instruction name
524 *
525 * @param opcode DIS instruction opcode
526 * @param fPatchFlags Patch flags
527 */
528PATMDECL(const char *) patmGetInstructionString(uint32_t opcode, uint32_t fPatchFlags)
529{
530 const char *pszInstr = NULL;
531
532 switch (opcode)
533 {
534 case OP_CLI:
535 pszInstr = "cli";
536 break;
537 case OP_PUSHF:
538 pszInstr = "pushf";
539 break;
540 case OP_POPF:
541 pszInstr = "popf";
542 break;
543 case OP_STR:
544 pszInstr = "str";
545 break;
546 case OP_LSL:
547 pszInstr = "lsl";
548 break;
549 case OP_LAR:
550 pszInstr = "lar";
551 break;
552 case OP_SGDT:
553 pszInstr = "sgdt";
554 break;
555 case OP_SLDT:
556 pszInstr = "sldt";
557 break;
558 case OP_SIDT:
559 pszInstr = "sidt";
560 break;
561 case OP_SMSW:
562 pszInstr = "smsw";
563 break;
564 case OP_VERW:
565 pszInstr = "verw";
566 break;
567 case OP_VERR:
568 pszInstr = "verr";
569 break;
570 case OP_CPUID:
571 pszInstr = "cpuid";
572 break;
573 case OP_JMP:
574 pszInstr = "jmp";
575 break;
576 case OP_JO:
577 pszInstr = "jo";
578 break;
579 case OP_JNO:
580 pszInstr = "jno";
581 break;
582 case OP_JC:
583 pszInstr = "jc";
584 break;
585 case OP_JNC:
586 pszInstr = "jnc";
587 break;
588 case OP_JE:
589 pszInstr = "je";
590 break;
591 case OP_JNE:
592 pszInstr = "jne";
593 break;
594 case OP_JBE:
595 pszInstr = "jbe";
596 break;
597 case OP_JNBE:
598 pszInstr = "jnbe";
599 break;
600 case OP_JS:
601 pszInstr = "js";
602 break;
603 case OP_JNS:
604 pszInstr = "jns";
605 break;
606 case OP_JP:
607 pszInstr = "jp";
608 break;
609 case OP_JNP:
610 pszInstr = "jnp";
611 break;
612 case OP_JL:
613 pszInstr = "jl";
614 break;
615 case OP_JNL:
616 pszInstr = "jnl";
617 break;
618 case OP_JLE:
619 pszInstr = "jle";
620 break;
621 case OP_JNLE:
622 pszInstr = "jnle";
623 break;
624 case OP_JECXZ:
625 pszInstr = "jecxz";
626 break;
627 case OP_LOOP:
628 pszInstr = "loop";
629 break;
630 case OP_LOOPNE:
631 pszInstr = "loopne";
632 break;
633 case OP_LOOPE:
634 pszInstr = "loope";
635 break;
636 case OP_MOV:
637 if (fPatchFlags & PATMFL_IDTHANDLER)
638 {
639 pszInstr = "mov (Int/Trap Handler)";
640 }
641 break;
642 case OP_SYSENTER:
643 pszInstr = "sysenter";
644 break;
645 case OP_PUSH:
646 pszInstr = "push (cs)";
647 break;
648 case OP_CALL:
649 pszInstr = "call";
650 break;
651 case OP_IRET:
652 pszInstr = "iret";
653 break;
654 }
655 return pszInstr;
656}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette