VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 38677

Last change on this file since 38677 was 38677, checked in by vboxsync, 13 years ago

IOM: MMIO instruction interpreter bug fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 75.9 KB
Line 
1/* $Id: IOMAllMMIO.cpp 38677 2011-09-07 14:45:22Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
32# include <VBox/vmm/iem.h>
33#endif
34#include "IOMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/hwaccm.h>
38#include "IOMInline.h"
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/vmm/pdmdev.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <VBox/log.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54
55/**
56 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
57 */
58static const unsigned g_aSize2Shift[] =
59{
60 ~0, /* 0 - invalid */
61 0, /* *1 == 2^0 */
62 1, /* *2 == 2^1 */
63 ~0, /* 3 - invalid */
64 2, /* *4 == 2^2 */
65 ~0, /* 5 - invalid */
66 ~0, /* 6 - invalid */
67 ~0, /* 7 - invalid */
68 3 /* *8 == 2^3 */
69};
70
71/**
72 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
73 */
74#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
75
76
77/**
78 * Wrapper which does the write and updates range statistics when such are enabled.
79 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE!
80 */
81static int iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
82{
83#ifdef VBOX_WITH_STATISTICS
84 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
85 Assert(pStats);
86#endif
87
88 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
89 int rc;
90 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
91 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
92 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
93 else
94 rc = VINF_SUCCESS;
95 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
96 STAM_COUNTER_INC(&pStats->Accesses);
97 return rc;
98}
99
100
101/**
102 * Wrapper which does the read and updates range statistics when such are enabled.
103 */
104DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
105{
106#ifdef VBOX_WITH_STATISTICS
107 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
108 Assert(pStats);
109#endif
110
111 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
112 int rc;
113 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
114 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
115 else
116 rc = VINF_IOM_MMIO_UNUSED_FF;
117 if (rc != VINF_SUCCESS)
118 {
119 switch (rc)
120 {
121 case VINF_IOM_MMIO_UNUSED_FF:
122 switch (cbValue)
123 {
124 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
125 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
126 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
127 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
128 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
129 }
130 rc = VINF_SUCCESS;
131 break;
132
133 case VINF_IOM_MMIO_UNUSED_00:
134 switch (cbValue)
135 {
136 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
137 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
138 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
139 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
140 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
141 }
142 rc = VINF_SUCCESS;
143 break;
144 }
145 }
146 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
147 STAM_COUNTER_INC(&pStats->Accesses);
148 return rc;
149}
150
151
152/**
153 * Internal - statistics only.
154 */
155DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
156{
157#ifdef VBOX_WITH_STATISTICS
158 switch (cb)
159 {
160 case 1:
161 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
162 break;
163 case 2:
164 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
165 break;
166 case 4:
167 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
168 break;
169 case 8:
170 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
171 break;
172 default:
173 /* No way. */
174 AssertMsgFailed(("Invalid data length %d\n", cb));
175 break;
176 }
177#else
178 NOREF(pVM); NOREF(cb);
179#endif
180}
181
182
183/**
184 * MOV reg, mem (read)
185 * MOVZX reg, mem (read)
186 * MOVSX reg, mem (read)
187 *
188 * @returns VBox status code.
189 *
190 * @param pVM The virtual machine.
191 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
192 * @param pCpu Disassembler CPU state.
193 * @param pRange Pointer MMIO range.
194 * @param GCPhysFault The GC physical address corresponding to pvFault.
195 */
196static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
197{
198 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
199
200 /*
201 * Get the data size from parameter 2,
202 * and call the handler function to get the data.
203 */
204 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
205 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
206
207 uint64_t u64Data = 0;
208 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
209 if (rc == VINF_SUCCESS)
210 {
211 /*
212 * Do sign extension for MOVSX.
213 */
214 /** @todo checkup MOVSX implementation! */
215 if (pCpu->pCurInstr->opcode == OP_MOVSX)
216 {
217 if (cb == 1)
218 {
219 /* DWORD <- BYTE */
220 int64_t iData = (int8_t)u64Data;
221 u64Data = (uint64_t)iData;
222 }
223 else
224 {
225 /* DWORD <- WORD */
226 int64_t iData = (int16_t)u64Data;
227 u64Data = (uint64_t)iData;
228 }
229 }
230
231 /*
232 * Store the result to register (parameter 1).
233 */
234 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, u64Data);
235 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
236 }
237
238 if (rc == VINF_SUCCESS)
239 iomMMIOStatLength(pVM, cb);
240 return rc;
241}
242
243
244/**
245 * MOV mem, reg|imm (write)
246 *
247 * @returns VBox status code.
248 *
249 * @param pVM The virtual machine.
250 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
251 * @param pCpu Disassembler CPU state.
252 * @param pRange Pointer MMIO range.
253 * @param GCPhysFault The GC physical address corresponding to pvFault.
254 */
255static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
256{
257 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
258
259 /*
260 * Get data to write from second parameter,
261 * and call the callback to write it.
262 */
263 unsigned cb = 0;
264 uint64_t u64Data = 0;
265 bool fRc = iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &u64Data, &cb);
266 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
267
268 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
269 if (rc == VINF_SUCCESS)
270 iomMMIOStatLength(pVM, cb);
271 return rc;
272}
273
274
275/** Wrapper for reading virtual memory. */
276DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
277{
278 /* Note: This will fail in R0 or RC if it hits an access handler. That
279 isn't a problem though since the operation can be restarted in REM. */
280#ifdef IN_RC
281 return MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
282#else
283 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
284#endif
285}
286
287
288/** Wrapper for writing virtual memory. */
289DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
290{
291 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
292 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
293 * as well since we're not behind the pgm lock and handler may change between calls.
294 *
295 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
296 * the state of some shadowed structures. */
297#if defined(IN_RING0) || defined(IN_RC)
298 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
299#else
300 NOREF(pCtxCore);
301 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
302#endif
303}
304
305
306#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
307/**
308 * [REP] MOVSB
309 * [REP] MOVSW
310 * [REP] MOVSD
311 *
312 * Restricted implementation.
313 *
314 *
315 * @returns VBox status code.
316 *
317 * @param pVM The virtual machine.
318 * @param uErrorCode CPU Error code.
319 * @param pRegFrame Trap register frame.
320 * @param GCPhysFault The GC physical address corresponding to pvFault.
321 * @param pCpu Disassembler CPU state.
322 * @param pRange Pointer MMIO range.
323 * @param ppStat Which sub-sample to attribute this call to.
324 */
325static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
326 PSTAMPROFILE *ppStat)
327{
328 /*
329 * We do not support segment prefixes or REPNE.
330 */
331 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
332 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
333
334 PVMCPU pVCpu = VMMGetCpu(pVM);
335
336 /*
337 * Get bytes/words/dwords/qword count to copy.
338 */
339 uint32_t cTransfers = 1;
340 if (pCpu->prefix & PREFIX_REP)
341 {
342#ifndef IN_RC
343 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
344 && pRegFrame->rcx >= _4G)
345 return VINF_EM_RAW_EMULATE_INSTR;
346#endif
347
348 cTransfers = pRegFrame->ecx;
349 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
350 cTransfers &= 0xffff;
351
352 if (!cTransfers)
353 return VINF_SUCCESS;
354 }
355
356 /* Get the current privilege level. */
357 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
358
359 /*
360 * Get data size.
361 */
362 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
363 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
364 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
365
366#ifdef VBOX_WITH_STATISTICS
367 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
368 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
369#endif
370
371/** @todo re-evaluate on page boundaries. */
372
373 RTGCPHYS Phys = GCPhysFault;
374 int rc;
375 if (fWriteAccess)
376 {
377 /*
378 * Write operation: [Mem] -> [MMIO]
379 * ds:esi (Virt Src) -> es:edi (Phys Dst)
380 */
381 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
382
383 /* Check callback. */
384 if (!pRange->CTX_SUFF(pfnWriteCallback))
385 return VINF_IOM_HC_MMIO_WRITE;
386
387 /* Convert source address ds:esi. */
388 RTGCUINTPTR pu8Virt;
389 rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
390 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
391 (PRTGCPTR)&pu8Virt);
392 if (RT_SUCCESS(rc))
393 {
394
395 /* Access verification first; we currently can't recover properly from traps inside this instruction */
396 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
397 if (rc != VINF_SUCCESS)
398 {
399 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
400 return VINF_EM_RAW_EMULATE_INSTR;
401 }
402
403#ifdef IN_RC
404 MMGCRamRegisterTrapHandler(pVM);
405#endif
406
407 /* copy loop. */
408 while (cTransfers)
409 {
410 uint32_t u32Data = 0;
411 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
412 if (rc != VINF_SUCCESS)
413 break;
414 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
415 if (rc != VINF_SUCCESS)
416 break;
417
418 pu8Virt += offIncrement;
419 Phys += offIncrement;
420 pRegFrame->rsi += offIncrement;
421 pRegFrame->rdi += offIncrement;
422 cTransfers--;
423 }
424#ifdef IN_RC
425 MMGCRamDeregisterTrapHandler(pVM);
426#endif
427 /* Update ecx. */
428 if (pCpu->prefix & PREFIX_REP)
429 pRegFrame->ecx = cTransfers;
430 }
431 else
432 rc = VINF_IOM_HC_MMIO_READ_WRITE;
433 }
434 else
435 {
436 /*
437 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
438 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
439 */
440 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
441
442 /* Check callback. */
443 if (!pRange->CTX_SUFF(pfnReadCallback))
444 return VINF_IOM_HC_MMIO_READ;
445
446 /* Convert destination address. */
447 RTGCUINTPTR pu8Virt;
448 rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
449 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
450 (RTGCPTR *)&pu8Virt);
451 if (RT_FAILURE(rc))
452 return VINF_IOM_HC_MMIO_READ;
453
454 /* Check if destination address is MMIO. */
455 PIOMMMIORANGE pMMIODst;
456 RTGCPHYS PhysDst;
457 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
458 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
459 if ( RT_SUCCESS(rc)
460 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
461 {
462 /** @todo implement per-device locks for MMIO access. */
463 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
464
465 /*
466 * Extra: [MMIO] -> [MMIO]
467 */
468 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
469 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
470 {
471 iomMmioReleaseRange(pVM, pRange);
472 return VINF_IOM_HC_MMIO_READ_WRITE;
473 }
474
475 /* copy loop. */
476 while (cTransfers)
477 {
478 uint32_t u32Data;
479 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
480 if (rc != VINF_SUCCESS)
481 break;
482 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
483 if (rc != VINF_SUCCESS)
484 break;
485
486 Phys += offIncrement;
487 PhysDst += offIncrement;
488 pRegFrame->rsi += offIncrement;
489 pRegFrame->rdi += offIncrement;
490 cTransfers--;
491 }
492 iomMmioReleaseRange(pVM, pRange);
493 }
494 else
495 {
496 /*
497 * Normal: [MMIO] -> [Mem]
498 */
499 /* Access verification first; we currently can't recover properly from traps inside this instruction */
500 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
501 if (rc != VINF_SUCCESS)
502 {
503 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
504 return VINF_EM_RAW_EMULATE_INSTR;
505 }
506
507 /* copy loop. */
508#ifdef IN_RC
509 MMGCRamRegisterTrapHandler(pVM);
510#endif
511 while (cTransfers)
512 {
513 uint32_t u32Data;
514 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
515 if (rc != VINF_SUCCESS)
516 break;
517 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
518 if (rc != VINF_SUCCESS)
519 {
520 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
521 break;
522 }
523
524 pu8Virt += offIncrement;
525 Phys += offIncrement;
526 pRegFrame->rsi += offIncrement;
527 pRegFrame->rdi += offIncrement;
528 cTransfers--;
529 }
530#ifdef IN_RC
531 MMGCRamDeregisterTrapHandler(pVM);
532#endif
533 }
534
535 /* Update ecx on exit. */
536 if (pCpu->prefix & PREFIX_REP)
537 pRegFrame->ecx = cTransfers;
538 }
539
540 /* work statistics. */
541 if (rc == VINF_SUCCESS)
542 iomMMIOStatLength(pVM, cb);
543 NOREF(ppStat);
544 return rc;
545}
546#endif /* IOM_WITH_MOVS_SUPPORT */
547
548
549/**
550 * Gets the address / opcode mask corresponding to the given CPU mode.
551 *
552 * @returns Mask.
553 * @param enmCpuMode CPU mode.
554 */
555static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
556{
557 switch (enmCpuMode)
558 {
559 case CPUMODE_16BIT: return UINT16_MAX;
560 case CPUMODE_32BIT: return UINT32_MAX;
561 case CPUMODE_64BIT: return UINT64_MAX;
562 default:
563 AssertFailedReturn(UINT32_MAX);
564 }
565}
566
567
568/**
569 * [REP] STOSB
570 * [REP] STOSW
571 * [REP] STOSD
572 *
573 * Restricted implementation.
574 *
575 *
576 * @returns VBox status code.
577 *
578 * @param pVM The virtual machine.
579 * @param pRegFrame Trap register frame.
580 * @param GCPhysFault The GC physical address corresponding to pvFault.
581 * @param pCpu Disassembler CPU state.
582 * @param pRange Pointer MMIO range.
583 */
584static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
585{
586 /*
587 * We do not support segment prefixes or REPNE..
588 */
589 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
590 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
591
592 /*
593 * Get bytes/words/dwords/qwords count to copy.
594 */
595 uint64_t const fAddrMask = iomDisModeToMask(pCpu->addrmode);
596 RTGCUINTREG cTransfers = 1;
597 if (pCpu->prefix & PREFIX_REP)
598 {
599#ifndef IN_RC
600 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM), pRegFrame)
601 && pRegFrame->rcx >= _4G)
602 return VINF_EM_RAW_EMULATE_INSTR;
603#endif
604
605 cTransfers = pRegFrame->rcx & fAddrMask;
606 if (!cTransfers)
607 return VINF_SUCCESS;
608 }
609
610/** @todo r=bird: bounds checks! */
611
612 /*
613 * Get data size.
614 */
615 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
616 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
617 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
618
619#ifdef VBOX_WITH_STATISTICS
620 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
621 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
622#endif
623
624
625 RTGCPHYS Phys = GCPhysFault;
626 int rc;
627 if ( pRange->CTX_SUFF(pfnFillCallback)
628 && cb <= 4 /* can only fill 32-bit values */)
629 {
630 /*
631 * Use the fill callback.
632 */
633 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
634 if (offIncrement > 0)
635 {
636 /* addr++ variant. */
637 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
638 pRegFrame->eax, cb, cTransfers);
639 if (rc == VINF_SUCCESS)
640 {
641 /* Update registers. */
642 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
643 | (pRegFrame->rdi & ~fAddrMask);
644 if (pCpu->prefix & PREFIX_REP)
645 pRegFrame->rcx &= ~fAddrMask;
646 }
647 }
648 else
649 {
650 /* addr-- variant. */
651 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
652 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
653 pRegFrame->eax, cb, cTransfers);
654 if (rc == VINF_SUCCESS)
655 {
656 /* Update registers. */
657 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
658 | (pRegFrame->rdi & ~fAddrMask);
659 if (pCpu->prefix & PREFIX_REP)
660 pRegFrame->rcx &= ~fAddrMask;
661 }
662 }
663 }
664 else
665 {
666 /*
667 * Use the write callback.
668 */
669 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
670 uint64_t u64Data = pRegFrame->rax;
671
672 /* fill loop. */
673 do
674 {
675 rc = iomMMIODoWrite(pVM, pRange, Phys, &u64Data, cb);
676 if (rc != VINF_SUCCESS)
677 break;
678
679 Phys += offIncrement;
680 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
681 | (pRegFrame->rdi & ~fAddrMask);
682 cTransfers--;
683 } while (cTransfers);
684
685 /* Update rcx on exit. */
686 if (pCpu->prefix & PREFIX_REP)
687 pRegFrame->rcx = (cTransfers & fAddrMask)
688 | (pRegFrame->rcx & ~fAddrMask);
689 }
690
691 /*
692 * Work statistics and return.
693 */
694 if (rc == VINF_SUCCESS)
695 iomMMIOStatLength(pVM, cb);
696 return rc;
697}
698
699
700/**
701 * [REP] LODSB
702 * [REP] LODSW
703 * [REP] LODSD
704 *
705 * Restricted implementation.
706 *
707 *
708 * @returns VBox status code.
709 *
710 * @param pVM The virtual machine.
711 * @param pRegFrame Trap register frame.
712 * @param GCPhysFault The GC physical address corresponding to pvFault.
713 * @param pCpu Disassembler CPU state.
714 * @param pRange Pointer MMIO range.
715 */
716static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
717{
718 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
719
720 /*
721 * We do not support segment prefixes or REP*.
722 */
723 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REP | PREFIX_REPNE))
724 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
725
726 /*
727 * Get data size.
728 */
729 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
730 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
731 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
732
733 /*
734 * Perform read.
735 */
736 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
737 if (rc == VINF_SUCCESS)
738 {
739 uint64_t const fAddrMask = iomDisModeToMask(pCpu->addrmode);
740 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
741 | (pRegFrame->rsi & ~fAddrMask);
742 }
743
744 /*
745 * Work statistics and return.
746 */
747 if (rc == VINF_SUCCESS)
748 iomMMIOStatLength(pVM, cb);
749 return rc;
750}
751
752
753/**
754 * CMP [MMIO], reg|imm
755 * CMP reg|imm, [MMIO]
756 *
757 * Restricted implementation.
758 *
759 *
760 * @returns VBox status code.
761 *
762 * @param pVM The virtual machine.
763 * @param pRegFrame Trap register frame.
764 * @param GCPhysFault The GC physical address corresponding to pvFault.
765 * @param pCpu Disassembler CPU state.
766 * @param pRange Pointer MMIO range.
767 */
768static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
769{
770 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
771
772 /*
773 * Get the operands.
774 */
775 unsigned cb = 0;
776 uint64_t uData1 = 0;
777 uint64_t uData2 = 0;
778 int rc;
779 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
780 /* cmp reg, [MMIO]. */
781 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
782 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
783 /* cmp [MMIO], reg|imm. */
784 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
785 else
786 {
787 AssertMsgFailed(("Disassember CMP problem..\n"));
788 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
789 }
790
791 if (rc == VINF_SUCCESS)
792 {
793#if HC_ARCH_BITS == 32
794 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
795 if (cb > 4)
796 return VINF_IOM_HC_MMIO_READ_WRITE;
797#endif
798 /* Emulate CMP and update guest flags. */
799 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
800 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
801 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
802 iomMMIOStatLength(pVM, cb);
803 }
804
805 return rc;
806}
807
808
809/**
810 * AND [MMIO], reg|imm
811 * AND reg, [MMIO]
812 * OR [MMIO], reg|imm
813 * OR reg, [MMIO]
814 *
815 * Restricted implementation.
816 *
817 *
818 * @returns VBox status code.
819 *
820 * @param pVM The virtual machine.
821 * @param pRegFrame Trap register frame.
822 * @param GCPhysFault The GC physical address corresponding to pvFault.
823 * @param pCpu Disassembler CPU state.
824 * @param pRange Pointer MMIO range.
825 * @param pfnEmulate Instruction emulation function.
826 */
827static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
828{
829 unsigned cb = 0;
830 uint64_t uData1 = 0;
831 uint64_t uData2 = 0;
832 bool fAndWrite;
833 int rc;
834
835#ifdef LOG_ENABLED
836 const char *pszInstr;
837
838 if (pCpu->pCurInstr->opcode == OP_XOR)
839 pszInstr = "Xor";
840 else if (pCpu->pCurInstr->opcode == OP_OR)
841 pszInstr = "Or";
842 else if (pCpu->pCurInstr->opcode == OP_AND)
843 pszInstr = "And";
844 else
845 pszInstr = "OrXorAnd??";
846#endif
847
848 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
849 {
850#if HC_ARCH_BITS == 32
851 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
852 if (cb > 4)
853 return VINF_IOM_HC_MMIO_READ_WRITE;
854#endif
855 /* and reg, [MMIO]. */
856 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
857 fAndWrite = false;
858 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
859 }
860 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
861 {
862#if HC_ARCH_BITS == 32
863 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
864 if (cb > 4)
865 return VINF_IOM_HC_MMIO_READ_WRITE;
866#endif
867 /* and [MMIO], reg|imm. */
868 fAndWrite = true;
869 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
870 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
871 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
872 else
873 rc = VINF_IOM_HC_MMIO_READ_WRITE;
874 }
875 else
876 {
877 AssertMsgFailed(("Disassember AND problem..\n"));
878 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
879 }
880
881 if (rc == VINF_SUCCESS)
882 {
883 /* Emulate AND and update guest flags. */
884 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
885
886 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
887
888 if (fAndWrite)
889 /* Store result to MMIO. */
890 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
891 else
892 {
893 /* Store result to register. */
894 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData1);
895 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
896 }
897 if (rc == VINF_SUCCESS)
898 {
899 /* Update guest's eflags and finish. */
900 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
901 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
902 iomMMIOStatLength(pVM, cb);
903 }
904 }
905
906 return rc;
907}
908
909
910/**
911 * TEST [MMIO], reg|imm
912 * TEST reg, [MMIO]
913 *
914 * Restricted implementation.
915 *
916 *
917 * @returns VBox status code.
918 *
919 * @param pVM The virtual machine.
920 * @param pRegFrame Trap register frame.
921 * @param GCPhysFault The GC physical address corresponding to pvFault.
922 * @param pCpu Disassembler CPU state.
923 * @param pRange Pointer MMIO range.
924 */
925static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
926{
927 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
928
929 unsigned cb = 0;
930 uint64_t uData1 = 0;
931 uint64_t uData2 = 0;
932 int rc;
933
934 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
935 {
936 /* and test, [MMIO]. */
937 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
938 }
939 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
940 {
941 /* test [MMIO], reg|imm. */
942 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
943 }
944 else
945 {
946 AssertMsgFailed(("Disassember TEST problem..\n"));
947 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
948 }
949
950 if (rc == VINF_SUCCESS)
951 {
952#if HC_ARCH_BITS == 32
953 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
954 if (cb > 4)
955 return VINF_IOM_HC_MMIO_READ_WRITE;
956#endif
957
958 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
959 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
960 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
961 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
962 iomMMIOStatLength(pVM, cb);
963 }
964
965 return rc;
966}
967
968
969/**
970 * BT [MMIO], reg|imm
971 *
972 * Restricted implementation.
973 *
974 *
975 * @returns VBox status code.
976 *
977 * @param pVM The virtual machine.
978 * @param pRegFrame Trap register frame.
979 * @param GCPhysFault The GC physical address corresponding to pvFault.
980 * @param pCpu Disassembler CPU state.
981 * @param pRange Pointer MMIO range.
982 */
983static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
984{
985 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
986
987 uint64_t uBit = 0;
988 uint64_t uData = 0;
989 unsigned cbIgnored;
990
991 if (!iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uBit, &cbIgnored))
992 {
993 AssertMsgFailed(("Disassember BT problem..\n"));
994 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
995 }
996 /* The size of the memory operand only matters here. */
997 unsigned cbData = DISGetParamSize(pCpu, &pCpu->param1);
998
999 /* bt [MMIO], reg|imm. */
1000 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData);
1001 if (rc == VINF_SUCCESS)
1002 {
1003 /* Find the bit inside the faulting address */
1004 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1005 iomMMIOStatLength(pVM, cbData);
1006 }
1007
1008 return rc;
1009}
1010
1011/**
1012 * XCHG [MMIO], reg
1013 * XCHG reg, [MMIO]
1014 *
1015 * Restricted implementation.
1016 *
1017 *
1018 * @returns VBox status code.
1019 *
1020 * @param pVM The virtual machine.
1021 * @param pRegFrame Trap register frame.
1022 * @param GCPhysFault The GC physical address corresponding to pvFault.
1023 * @param pCpu Disassembler CPU state.
1024 * @param pRange Pointer MMIO range.
1025 */
1026static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1027{
1028 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1029 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1030 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1031 return VINF_IOM_HC_MMIO_READ_WRITE;
1032
1033 int rc;
1034 unsigned cb = 0;
1035 uint64_t uData1 = 0;
1036 uint64_t uData2 = 0;
1037 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
1038 {
1039 /* xchg reg, [MMIO]. */
1040 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1041 if (rc == VINF_SUCCESS)
1042 {
1043 /* Store result to MMIO. */
1044 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1045
1046 if (rc == VINF_SUCCESS)
1047 {
1048 /* Store result to register. */
1049 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData2);
1050 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1051 }
1052 else
1053 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1054 }
1055 else
1056 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1057 }
1058 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
1059 {
1060 /* xchg [MMIO], reg. */
1061 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1062 if (rc == VINF_SUCCESS)
1063 {
1064 /* Store result to MMIO. */
1065 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1066 if (rc == VINF_SUCCESS)
1067 {
1068 /* Store result to register. */
1069 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param2, pRegFrame, uData1);
1070 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1071 }
1072 else
1073 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1074 }
1075 else
1076 AssertMsg(rc == VINF_IOM_HC_MMIO_READ_WRITE || rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1077 }
1078 else
1079 {
1080 AssertMsgFailed(("Disassember XCHG problem..\n"));
1081 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1082 }
1083 return rc;
1084}
1085
1086
1087/**
1088 * \#PF Handler callback for MMIO ranges.
1089 *
1090 * @returns VBox status code (appropriate for GC return).
1091 * @param pVM VM Handle.
1092 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1093 * any error code (the EPT misconfig hack).
1094 * @param pCtxCore Trap register frame.
1095 * @param GCPhysFault The GC physical address corresponding to pvFault.
1096 * @param pvUser Pointer to the MMIO ring-3 range entry.
1097 */
1098static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1099{
1100 /* Take the IOM lock before performing any MMIO. */
1101 int rc = IOM_LOCK(pVM);
1102#ifndef IN_RING3
1103 if (rc == VERR_SEM_BUSY)
1104 return VINF_IOM_HC_MMIO_READ_WRITE;
1105#endif
1106 AssertRC(rc);
1107
1108 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1109 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n",
1110 GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1111
1112 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1113 Assert(pRange);
1114 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1115
1116#ifdef VBOX_WITH_STATISTICS
1117 /*
1118 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1119 */
1120 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
1121 if (!pStats)
1122 {
1123# ifdef IN_RING3
1124 IOM_UNLOCK(pVM);
1125 return VERR_NO_MEMORY;
1126# else
1127 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1128 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1129 IOM_UNLOCK(pVM);
1130 return VINF_IOM_HC_MMIO_READ_WRITE;
1131# endif
1132 }
1133#endif
1134
1135#ifndef IN_RING3
1136 /*
1137 * Should we defer the request right away? This isn't usually the case, so
1138 * do the simple test first and the try deal with uErrorCode being N/A.
1139 */
1140 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1141 || !pRange->CTX_SUFF(pfnReadCallback))
1142 && ( uErrorCode == UINT32_MAX
1143 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1144 : uErrorCode & X86_TRAP_PF_RW
1145 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1146 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1147 )
1148 )
1149 )
1150 {
1151 if (uErrorCode & X86_TRAP_PF_RW)
1152 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1153 else
1154 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1155
1156 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1157 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1158 IOM_UNLOCK(pVM);
1159 return VINF_IOM_HC_MMIO_READ_WRITE;
1160 }
1161#endif /* !IN_RING3 */
1162
1163 /*
1164 * Retain the range and do locking.
1165 */
1166 iomMmioRetainRange(pRange);
1167 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1168 IOM_UNLOCK(pVM);
1169 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_HC_MMIO_READ_WRITE);
1170 if (rc != VINF_SUCCESS)
1171 {
1172 iomMmioReleaseRange(pVM, pRange);
1173 return rc;
1174 }
1175
1176 /*
1177 * Disassemble the instruction and interpret it.
1178 */
1179 PVMCPU pVCpu = VMMGetCpu(pVM);
1180 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1181 unsigned cbOp;
1182 rc = EMInterpretDisasOne(pVM, pVCpu, pCtxCore, pDis, &cbOp);
1183 AssertRC(rc);
1184 if (RT_FAILURE(rc))
1185 {
1186 iomMmioReleaseRange(pVM, pRange);
1187 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1188 return rc;
1189 }
1190 switch (pDis->pCurInstr->opcode)
1191 {
1192 case OP_MOV:
1193 case OP_MOVZX:
1194 case OP_MOVSX:
1195 {
1196 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1197 AssertMsg(uErrorCode == UINT32_MAX || DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->param1.flags, DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags), pDis->param2.flags, DIS_IS_EFFECTIVE_ADDR(pDis->param2.flags), uErrorCode));
1198 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1199 ? uErrorCode & X86_TRAP_PF_RW
1200 : DIS_IS_EFFECTIVE_ADDR(pDis->param1.flags))
1201 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1202 else
1203 rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1204 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1205 break;
1206 }
1207
1208
1209#ifdef IOM_WITH_MOVS_SUPPORT
1210 case OP_MOVSB:
1211 case OP_MOVSWD:
1212 {
1213 if (uErrorCode == UINT32_MAX)
1214 rc = VINF_IOM_HC_MMIO_READ_WRITE;
1215 else
1216 {
1217 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1218 PSTAMPROFILE pStat = NULL;
1219 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1220 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1221 }
1222 break;
1223 }
1224#endif
1225
1226 case OP_STOSB:
1227 case OP_STOSWD:
1228 Assert(uErrorCode & X86_TRAP_PF_RW);
1229 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1230 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1231 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1232 break;
1233
1234 case OP_LODSB:
1235 case OP_LODSWD:
1236 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1237 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1238 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1239 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1240 break;
1241
1242 case OP_CMP:
1243 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1244 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1245 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1246 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1247 break;
1248
1249 case OP_AND:
1250 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1251 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1252 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1253 break;
1254
1255 case OP_OR:
1256 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1257 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1258 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1259 break;
1260
1261 case OP_XOR:
1262 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1263 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1264 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1265 break;
1266
1267 case OP_TEST:
1268 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1269 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1270 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1271 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1272 break;
1273
1274 case OP_BT:
1275 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1276 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1277 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1278 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1279 break;
1280
1281 case OP_XCHG:
1282 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1283 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1284 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1285 break;
1286
1287
1288 /*
1289 * The instruction isn't supported. Hand it on to ring-3.
1290 */
1291 default:
1292 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1293 rc = VINF_IOM_HC_MMIO_READ_WRITE;
1294 break;
1295 }
1296
1297 /*
1298 * On success advance EIP.
1299 */
1300 if (rc == VINF_SUCCESS)
1301 pCtxCore->rip += cbOp;
1302 else
1303 {
1304 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1305#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1306 switch (rc)
1307 {
1308 case VINF_IOM_HC_MMIO_READ:
1309 case VINF_IOM_HC_MMIO_READ_WRITE:
1310 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1311 break;
1312 case VINF_IOM_HC_MMIO_WRITE:
1313 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1314 break;
1315 }
1316#endif
1317 }
1318
1319 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1320 iomMmioReleaseRange(pVM, pRange);
1321 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1322 return rc;
1323}
1324
1325/**
1326 * \#PF Handler callback for MMIO ranges.
1327 *
1328 * @returns VBox status code (appropriate for GC return).
1329 * @param pVM VM Handle.
1330 * @param uErrorCode CPU Error code.
1331 * @param pCtxCore Trap register frame.
1332 * @param pvFault The fault address (cr2).
1333 * @param GCPhysFault The GC physical address corresponding to pvFault.
1334 * @param pvUser Pointer to the MMIO ring-3 range entry.
1335 */
1336VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1337{
1338 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1339 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1340 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1341 return VBOXSTRICTRC_VAL(rcStrict);
1342}
1343
1344/**
1345 * Physical access handler for MMIO ranges.
1346 *
1347 * @returns VBox status code (appropriate for GC return).
1348 * @param pVM VM Handle.
1349 * @param uErrorCode CPU Error code.
1350 * @param pCtxCore Trap register frame.
1351 * @param GCPhysFault The GC physical address.
1352 */
1353VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1354{
1355 int rc2 = IOM_LOCK(pVM);
1356#ifndef IN_RING3
1357 if (rc2 == VERR_SEM_BUSY)
1358 return VINF_IOM_HC_MMIO_READ_WRITE;
1359#endif
1360 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMmioGetRange(pVM, GCPhysFault));
1361 IOM_UNLOCK(pVM);
1362 return VBOXSTRICTRC_VAL(rcStrict);
1363}
1364
1365
1366#ifdef IN_RING3
1367/**
1368 * \#PF Handler callback for MMIO ranges.
1369 *
1370 * @returns VINF_SUCCESS if the handler have carried out the operation.
1371 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1372 * @param pVM VM Handle.
1373 * @param GCPhys The physical address the guest is writing to.
1374 * @param pvPhys The HC mapping of that address.
1375 * @param pvBuf What the guest is reading/writing.
1376 * @param cbBuf How much it's reading/writing.
1377 * @param enmAccessType The access type.
1378 * @param pvUser Pointer to the MMIO range entry.
1379 */
1380DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf,
1381 PGMACCESSTYPE enmAccessType, void *pvUser)
1382{
1383 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1384 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1385
1386 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1387 AssertPtr(pRange);
1388
1389 /*
1390 * Validate the range.
1391 */
1392 int rc = IOM_LOCK(pVM);
1393 AssertRC(rc);
1394 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1395
1396 /*
1397 * Perform locking.
1398 */
1399 iomMmioRetainRange(pRange);
1400 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1401 IOM_UNLOCK(pVM);
1402 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_HC_MMIO_READ_WRITE);
1403 if (rc != VINF_SUCCESS)
1404 {
1405 iomMmioReleaseRange(pVM, pRange);
1406 return rc;
1407 }
1408
1409 /*
1410 * Perform the access.
1411 */
1412 if (enmAccessType == PGMACCESSTYPE_READ)
1413 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1414 else
1415 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1416
1417 AssertRC(rc);
1418 iomMmioReleaseRange(pVM, pRange);
1419 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1420 return rc;
1421}
1422#endif /* IN_RING3 */
1423
1424
1425/**
1426 * Reads a MMIO register.
1427 *
1428 * @returns VBox status code.
1429 *
1430 * @param pVM VM handle.
1431 * @param GCPhys The physical address to read.
1432 * @param pu32Value Where to store the value read.
1433 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1434 */
1435VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1436{
1437 /* Take the IOM lock before performing any MMIO. */
1438 int rc = IOM_LOCK(pVM);
1439#ifndef IN_RING3
1440 if (rc == VERR_SEM_BUSY)
1441 return VINF_IOM_HC_MMIO_WRITE;
1442#endif
1443 AssertRC(rc);
1444#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1445 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
1446#endif
1447
1448 /*
1449 * Lookup the current context range node and statistics.
1450 */
1451 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1452 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1453 if (!pRange)
1454 {
1455 IOM_UNLOCK(pVM);
1456 return VERR_INTERNAL_ERROR;
1457 }
1458#ifdef VBOX_WITH_STATISTICS
1459 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1460 if (!pStats)
1461 {
1462 IOM_UNLOCK(pVM);
1463# ifdef IN_RING3
1464 return VERR_NO_MEMORY;
1465# else
1466 return VINF_IOM_HC_MMIO_READ;
1467# endif
1468 }
1469 STAM_COUNTER_INC(&pStats->Accesses);
1470#endif /* VBOX_WITH_STATISTICS */
1471
1472 if (pRange->CTX_SUFF(pfnReadCallback))
1473 {
1474 /*
1475 * Perform locking.
1476 */
1477 iomMmioRetainRange(pRange);
1478 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1479 IOM_UNLOCK(pVM);
1480 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_HC_MMIO_WRITE);
1481 if (rc != VINF_SUCCESS)
1482 {
1483 iomMmioReleaseRange(pVM, pRange);
1484 return rc;
1485 }
1486
1487 /*
1488 * Perform the read and deal with the result.
1489 */
1490 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1491 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pu32Value, (unsigned)cbValue);
1492 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1493 switch (rc)
1494 {
1495 case VINF_SUCCESS:
1496 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1497 iomMmioReleaseRange(pVM, pRange);
1498 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1499 return rc;
1500#ifndef IN_RING3
1501 case VINF_IOM_HC_MMIO_READ:
1502 case VINF_IOM_HC_MMIO_READ_WRITE:
1503 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1504#endif
1505 default:
1506 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1507 iomMmioReleaseRange(pVM, pRange);
1508 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1509 return rc;
1510
1511 case VINF_IOM_MMIO_UNUSED_00:
1512 switch (cbValue)
1513 {
1514 case 1: *(uint8_t *)pu32Value = UINT8_C(0x00); break;
1515 case 2: *(uint16_t *)pu32Value = UINT16_C(0x0000); break;
1516 case 4: *(uint32_t *)pu32Value = UINT32_C(0x00000000); break;
1517 case 8: *(uint64_t *)pu32Value = UINT64_C(0x0000000000000000); break;
1518 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1519 }
1520 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1521 iomMmioReleaseRange(pVM, pRange);
1522 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1523 return VINF_SUCCESS;
1524
1525 case VINF_IOM_MMIO_UNUSED_FF:
1526 switch (cbValue)
1527 {
1528 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1529 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1530 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1531 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1532 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1533 }
1534 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1535 iomMmioReleaseRange(pVM, pRange);
1536 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1537 return VINF_SUCCESS;
1538 }
1539 /* not reached */
1540 }
1541#ifndef IN_RING3
1542 if (pRange->pfnReadCallbackR3)
1543 {
1544 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1545 IOM_UNLOCK(pVM);
1546 return VINF_IOM_HC_MMIO_READ;
1547 }
1548#endif
1549
1550 /*
1551 * Unassigned memory - this is actually not supposed t happen...
1552 */
1553 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1554 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1555 switch (cbValue)
1556 {
1557 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1558 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1559 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1560 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1561 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1562 }
1563 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1564 IOM_UNLOCK(pVM);
1565 return VINF_SUCCESS;
1566}
1567
1568
1569/**
1570 * Writes to a MMIO register.
1571 *
1572 * @returns VBox status code.
1573 *
1574 * @param pVM VM handle.
1575 * @param GCPhys The physical address to write to.
1576 * @param u32Value The value to write.
1577 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1578 */
1579VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1580{
1581 /* Take the IOM lock before performing any MMIO. */
1582 int rc = IOM_LOCK(pVM);
1583#ifndef IN_RING3
1584 if (rc == VERR_SEM_BUSY)
1585 return VINF_IOM_HC_MMIO_WRITE;
1586#endif
1587 AssertRC(rc);
1588#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1589 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1590#endif
1591
1592 /*
1593 * Lookup the current context range node.
1594 */
1595 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1596 AssertMsg(pRange, ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1597 if (!pRange)
1598 {
1599 IOM_UNLOCK(pVM);
1600 return VERR_INTERNAL_ERROR;
1601 }
1602#ifdef VBOX_WITH_STATISTICS
1603 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1604 if (!pStats)
1605 {
1606 IOM_UNLOCK(pVM);
1607# ifdef IN_RING3
1608 return VERR_NO_MEMORY;
1609# else
1610 return VINF_IOM_HC_MMIO_WRITE;
1611# endif
1612 }
1613 STAM_COUNTER_INC(&pStats->Accesses);
1614#endif /* VBOX_WITH_STATISTICS */
1615
1616 if (pRange->CTX_SUFF(pfnWriteCallback))
1617 {
1618 /*
1619 * Perform locking.
1620 */
1621 iomMmioRetainRange(pRange);
1622 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1623 IOM_UNLOCK(pVM);
1624 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_HC_MMIO_READ);
1625 if (rc != VINF_SUCCESS)
1626 {
1627 iomMmioReleaseRange(pVM, pRange);
1628 return rc;
1629 }
1630
1631 /*
1632 * Perform the write.
1633 */
1634 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1635 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1636 GCPhys, &u32Value, (unsigned)cbValue);
1637 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1638#ifndef IN_RING3
1639 if ( rc == VINF_IOM_HC_MMIO_WRITE
1640 || rc == VINF_IOM_HC_MMIO_READ_WRITE)
1641 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1642#endif
1643 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, rc));
1644 iomMmioReleaseRange(pVM, pRange);
1645 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1646 return rc;
1647 }
1648#ifndef IN_RING3
1649 if (pRange->pfnWriteCallbackR3)
1650 {
1651 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1652 IOM_UNLOCK(pVM);
1653 return VINF_IOM_HC_MMIO_WRITE;
1654 }
1655#endif
1656
1657 /*
1658 * No write handler, nothing to do.
1659 */
1660 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1661 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1662 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1663 IOM_UNLOCK(pVM);
1664 return VINF_SUCCESS;
1665}
1666
1667
1668/**
1669 * [REP*] INSB/INSW/INSD
1670 * ES:EDI,DX[,ECX]
1671 *
1672 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1673 *
1674 * @returns Strict VBox status code. Informational status codes other than the one documented
1675 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1676 * @retval VINF_SUCCESS Success.
1677 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1678 * status code must be passed on to EM.
1679 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1680 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1681 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1682 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1683 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1684 *
1685 * @param pVM The virtual machine.
1686 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1687 * @param uPort IO Port
1688 * @param uPrefix IO instruction prefix
1689 * @param enmAddrMode The address mode.
1690 * @param cbTransfer Size of transfer unit
1691 */
1692VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
1693 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
1694{
1695 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
1696
1697 /*
1698 * We do not support REPNE or decrementing destination
1699 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
1700 */
1701 if ( (uPrefix & PREFIX_REPNE)
1702 || pRegFrame->eflags.Bits.u1DF)
1703 return VINF_EM_RAW_EMULATE_INSTR;
1704
1705 PVMCPU pVCpu = VMMGetCpu(pVM);
1706
1707 /*
1708 * Get bytes/words/dwords count to transfer.
1709 */
1710 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
1711 RTGCUINTREG cTransfers = 1;
1712 if (uPrefix & PREFIX_REP)
1713 {
1714#ifndef IN_RC
1715 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1716 && pRegFrame->rcx >= _4G)
1717 return VINF_EM_RAW_EMULATE_INSTR;
1718#endif
1719 cTransfers = pRegFrame->rcx & fAddrMask;
1720 if (!cTransfers)
1721 return VINF_SUCCESS;
1722 }
1723
1724 /* Convert destination address es:edi. */
1725 RTGCPTR GCPtrDst;
1726 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
1727 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1728 &GCPtrDst);
1729 if (RT_FAILURE(rc2))
1730 {
1731 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
1732 return VINF_EM_RAW_EMULATE_INSTR;
1733 }
1734
1735 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
1736 uint32_t const cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1737 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
1738 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1739 if (rc2 != VINF_SUCCESS)
1740 {
1741 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
1742 return VINF_EM_RAW_EMULATE_INSTR;
1743 }
1744
1745 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1746 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1747 if (cTransfers > 1)
1748 {
1749 /* If the device supports string transfers, ask it to do as
1750 * much as it wants. The rest is done with single-word transfers. */
1751 const RTGCUINTREG cTransfersOrg = cTransfers;
1752 rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
1753 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1754 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
1755 | (pRegFrame->rdi & ~fAddrMask);
1756 }
1757
1758#ifdef IN_RC
1759 MMGCRamRegisterTrapHandler(pVM);
1760#endif
1761 while (cTransfers && rcStrict == VINF_SUCCESS)
1762 {
1763 uint32_t u32Value;
1764 rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
1765 if (!IOM_SUCCESS(rcStrict))
1766 break;
1767 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
1768 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1769 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
1770 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
1771 | (pRegFrame->rdi & ~fAddrMask);
1772 cTransfers--;
1773 }
1774#ifdef IN_RC
1775 MMGCRamDeregisterTrapHandler(pVM);
1776#endif
1777
1778 /* Update rcx on exit. */
1779 if (uPrefix & PREFIX_REP)
1780 pRegFrame->rcx = (cTransfers & fAddrMask)
1781 | (pRegFrame->rcx & ~fAddrMask);
1782
1783 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1784 return rcStrict;
1785}
1786
1787
1788/**
1789 * [REP*] INSB/INSW/INSD
1790 * ES:EDI,DX[,ECX]
1791 *
1792 * @returns Strict VBox status code. Informational status codes other than the one documented
1793 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1794 * @retval VINF_SUCCESS Success.
1795 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1796 * status code must be passed on to EM.
1797 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1798 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1799 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1800 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1801 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1802 *
1803 * @param pVM The virtual machine.
1804 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1805 * @param pCpu Disassembler CPU state.
1806 */
1807VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1808{
1809 /*
1810 * Get port number directly from the register (no need to bother the
1811 * disassembler). And get the I/O register size from the opcode / prefix.
1812 */
1813 RTIOPORT Port = pRegFrame->edx & 0xffff;
1814 unsigned cb = 0;
1815 if (pCpu->pCurInstr->opcode == OP_INSB)
1816 cb = 1;
1817 else
1818 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1819
1820 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1821 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1822 {
1823 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1824 return rcStrict;
1825 }
1826
1827 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->prefix, pCpu->addrmode, cb);
1828}
1829
1830
1831/**
1832 * [REP*] OUTSB/OUTSW/OUTSD
1833 * DS:ESI,DX[,ECX]
1834 *
1835 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1836 *
1837 * @returns Strict VBox status code. Informational status codes other than the one documented
1838 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1839 * @retval VINF_SUCCESS Success.
1840 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1841 * status code must be passed on to EM.
1842 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1843 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1844 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1845 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1846 *
1847 * @param pVM The virtual machine.
1848 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1849 * @param uPort IO Port
1850 * @param uPrefix IO instruction prefix
1851 * @param enmAddrMode The address mode.
1852 * @param cbTransfer Size of transfer unit
1853 */
1854VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
1855 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
1856{
1857 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
1858
1859 /*
1860 * We do not support segment prefixes, REPNE or
1861 * decrementing source pointer.
1862 */
1863 if ( (uPrefix & (PREFIX_SEG | PREFIX_REPNE))
1864 || pRegFrame->eflags.Bits.u1DF)
1865 return VINF_EM_RAW_EMULATE_INSTR;
1866
1867 PVMCPU pVCpu = VMMGetCpu(pVM);
1868
1869 /*
1870 * Get bytes/words/dwords count to transfer.
1871 */
1872 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
1873 RTGCUINTREG cTransfers = 1;
1874 if (uPrefix & PREFIX_REP)
1875 {
1876#ifndef IN_RC
1877 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1878 && pRegFrame->rcx >= _4G)
1879 return VINF_EM_RAW_EMULATE_INSTR;
1880#endif
1881 cTransfers = pRegFrame->rcx & fAddrMask;
1882 if (!cTransfers)
1883 return VINF_SUCCESS;
1884 }
1885
1886 /* Convert source address ds:esi. */
1887 RTGCPTR GCPtrSrc;
1888 int rc2 = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
1889 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1890 &GCPtrSrc);
1891 if (RT_FAILURE(rc2))
1892 {
1893 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
1894 return VINF_EM_RAW_EMULATE_INSTR;
1895 }
1896
1897 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1898 uint32_t const cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1899 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
1900 (cpl == 3) ? X86_PTE_US : 0);
1901 if (rc2 != VINF_SUCCESS)
1902 {
1903 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
1904 return VINF_EM_RAW_EMULATE_INSTR;
1905 }
1906
1907 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1908 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1909 if (cTransfers > 1)
1910 {
1911 /*
1912 * If the device supports string transfers, ask it to do as
1913 * much as it wants. The rest is done with single-word transfers.
1914 */
1915 const RTGCUINTREG cTransfersOrg = cTransfers;
1916 rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
1917 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
1918 pRegFrame->rsi = ((pRegFrame->rsi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
1919 | (pRegFrame->rsi & ~fAddrMask);
1920 }
1921
1922#ifdef IN_RC
1923 MMGCRamRegisterTrapHandler(pVM);
1924#endif
1925
1926 while (cTransfers && rcStrict == VINF_SUCCESS)
1927 {
1928 uint32_t u32Value = 0;
1929 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
1930 if (rcStrict != VINF_SUCCESS)
1931 break;
1932 rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
1933 if (!IOM_SUCCESS(rcStrict))
1934 break;
1935 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
1936 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
1937 | (pRegFrame->rsi & ~fAddrMask);
1938 cTransfers--;
1939 }
1940
1941#ifdef IN_RC
1942 MMGCRamDeregisterTrapHandler(pVM);
1943#endif
1944
1945 /* Update rcx on exit. */
1946 if (uPrefix & PREFIX_REP)
1947 pRegFrame->rcx = (cTransfers & fAddrMask)
1948 | (pRegFrame->rcx & ~fAddrMask);
1949
1950 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_HC_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1951 return rcStrict;
1952}
1953
1954
1955/**
1956 * [REP*] OUTSB/OUTSW/OUTSD
1957 * DS:ESI,DX[,ECX]
1958 *
1959 * @returns Strict VBox status code. Informational status codes other than the one documented
1960 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1961 * @retval VINF_SUCCESS Success.
1962 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1963 * status code must be passed on to EM.
1964 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1965 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
1966 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1967 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1968 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1969 *
1970 * @param pVM The virtual machine.
1971 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1972 * @param pCpu Disassembler CPU state.
1973 */
1974VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1975{
1976 /*
1977 * Get port number from the first parameter.
1978 * And get the I/O register size from the opcode / prefix.
1979 */
1980 uint64_t Port = 0;
1981 unsigned cb = 0;
1982 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb);
1983 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
1984 if (pCpu->pCurInstr->opcode == OP_OUTSB)
1985 cb = 1;
1986 else
1987 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1988
1989 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1990 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1991 {
1992 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1993 return rcStrict;
1994 }
1995
1996 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->prefix, pCpu->addrmode, cb);
1997}
1998
1999#ifndef IN_RC
2000
2001/**
2002 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2003 *
2004 * (This is a special optimization used by the VGA device.)
2005 *
2006 * @returns VBox status code.
2007 *
2008 * @param pVM The virtual machine.
2009 * @param GCPhys The address of the MMIO page to be changed.
2010 * @param GCPhysRemapped The address of the MMIO2 page.
2011 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2012 * for the time being.
2013 */
2014VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2015{
2016 /* Currently only called from the VGA device during MMIO. */
2017 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2018 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2019 PVMCPU pVCpu = VMMGetCpu(pVM);
2020
2021 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2022 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2023 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2024 && !HWACCMIsNestedPagingActive(pVM)))
2025 return VINF_SUCCESS; /* ignore */
2026
2027 IOM_LOCK(pVM);
2028
2029 /*
2030 * Lookup the context range node the page belongs to.
2031 */
2032 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
2033 AssertMsgReturn(pRange,
2034 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2035
2036 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2037 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2038
2039 /*
2040 * Do the aliasing; page align the addresses since PGM is picky.
2041 */
2042 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2043 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2044
2045 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2046
2047 IOM_UNLOCK(pVM);
2048 AssertRCReturn(rc, rc);
2049
2050 /*
2051 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2052 * can simply prefetch it.
2053 *
2054 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2055 */
2056#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2057# ifdef VBOX_STRICT
2058 uint64_t fFlags;
2059 RTHCPHYS HCPhys;
2060 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2061 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2062# endif
2063#endif
2064 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2065 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2066 return VINF_SUCCESS;
2067}
2068
2069
2070/**
2071 * Mapping a HC page in place of an MMIO page for direct access.
2072 *
2073 * (This is a special optimization used by the APIC in the VT-x case.)
2074 *
2075 * @returns VBox status code.
2076 *
2077 * @param pVM The virtual machine.
2078 * @param GCPhys The address of the MMIO page to be changed.
2079 * @param HCPhys The address of the host physical page.
2080 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2081 * for the time being.
2082 */
2083VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2084{
2085 /* Currently only called from VT-x code during a page fault. */
2086 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2087
2088 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2089 Assert(HWACCMIsEnabled(pVM));
2090
2091 PVMCPU pVCpu = VMMGetCpu(pVM);
2092
2093 /*
2094 * Lookup the context range node the page belongs to.
2095 */
2096#ifdef VBOX_STRICT
2097 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2098 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2099 AssertMsgReturn(pRange,
2100 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2101 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2102 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2103#endif
2104
2105 /*
2106 * Do the aliasing; page align the addresses since PGM is picky.
2107 */
2108 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2109 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2110
2111 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2112 AssertRCReturn(rc, rc);
2113
2114 /*
2115 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2116 * can simply prefetch it.
2117 *
2118 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2119 */
2120 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2121 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2122 return VINF_SUCCESS;
2123}
2124
2125
2126/**
2127 * Reset a previously modified MMIO region; restore the access flags.
2128 *
2129 * @returns VBox status code.
2130 *
2131 * @param pVM The virtual machine.
2132 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2133 */
2134VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2135{
2136 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2137
2138 PVMCPU pVCpu = VMMGetCpu(pVM);
2139
2140 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2141 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2142 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2143 && !HWACCMIsNestedPagingActive(pVM)))
2144 return VINF_SUCCESS; /* ignore */
2145
2146 /*
2147 * Lookup the context range node the page belongs to.
2148 */
2149#ifdef VBOX_STRICT
2150 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2151 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2152 AssertMsgReturn(pRange,
2153 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2154 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2155 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2156#endif
2157
2158 /*
2159 * Call PGM to do the job work.
2160 *
2161 * After the call, all the pages should be non-present... unless there is
2162 * a page pool flush pending (unlikely).
2163 */
2164 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2165 AssertRC(rc);
2166
2167#ifdef VBOX_STRICT
2168 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2169 {
2170 uint32_t cb = pRange->cb;
2171 GCPhys = pRange->GCPhys;
2172 while (cb)
2173 {
2174 uint64_t fFlags;
2175 RTHCPHYS HCPhys;
2176 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2177 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2178 cb -= PAGE_SIZE;
2179 GCPhys += PAGE_SIZE;
2180 }
2181 }
2182#endif
2183 return rc;
2184}
2185
2186#endif /* !IN_RC */
2187
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette