VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 19454

Last change on this file since 19454 was 19141, checked in by vboxsync, 15 years ago

Action flags breakup.
Fixed PGM saved state loading of 2.2.2 images.
Reduced hacks in PATM state loading (fixups).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 64.1 KB
Line 
1/* $Id: IOMAllMMIO.cpp 19141 2009-04-23 13:52:18Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_IOM
27#include <VBox/iom.h>
28#include <VBox/cpum.h>
29#include <VBox/pgm.h>
30#include <VBox/selm.h>
31#include <VBox/mm.h>
32#include <VBox/em.h>
33#include <VBox/pgm.h>
34#include <VBox/trpm.h>
35#include "IOMInternal.h"
36#include <VBox/vm.h>
37#include <VBox/vmm.h>
38#include <VBox/hwaccm.h>
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/param.h>
43#include <VBox/err.h>
44#include <iprt/assert.h>
45#include <VBox/log.h>
46#include <iprt/asm.h>
47#include <iprt/string.h>
48
49
50/*******************************************************************************
51* Global Variables *
52*******************************************************************************/
53
54/**
55 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
56 */
57static const unsigned g_aSize2Shift[] =
58{
59 ~0, /* 0 - invalid */
60 0, /* *1 == 2^0 */
61 1, /* *2 == 2^1 */
62 ~0, /* 3 - invalid */
63 2, /* *4 == 2^2 */
64 ~0, /* 5 - invalid */
65 ~0, /* 6 - invalid */
66 ~0, /* 7 - invalid */
67 3 /* *8 == 2^3 */
68};
69
70/**
71 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
72 */
73#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
74
75
76/**
77 * Wrapper which does the write and updates range statistics when such are enabled.
78 * @warning RT_SUCCESS(rc=VINF_IOM_HC_MMIO_WRITE) is TRUE!
79 */
80DECLINLINE(int) iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
81{
82#ifdef VBOX_WITH_STATISTICS
83 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
84 Assert(pStats);
85#endif
86
87 int rc;
88 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
89 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhysFault, (void *)pvData, cb); /* @todo fix const!! */
90 else
91 rc = VINF_SUCCESS;
92 if (rc != VINF_IOM_HC_MMIO_WRITE)
93 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
94 return rc;
95}
96
97
98/**
99 * Wrapper which does the read and updates range statistics when such are enabled.
100 */
101DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
102{
103#ifdef VBOX_WITH_STATISTICS
104 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
105 Assert(pStats);
106#endif
107
108 int rc;
109 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
110 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
111 else
112 rc = VINF_IOM_MMIO_UNUSED_FF;
113 if (rc != VINF_SUCCESS)
114 {
115 switch (rc)
116 {
117 case VINF_IOM_MMIO_UNUSED_FF:
118 switch (cbValue)
119 {
120 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
121 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
122 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
123 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
124 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
125 }
126 rc = VINF_SUCCESS;
127 break;
128
129 case VINF_IOM_MMIO_UNUSED_00:
130 switch (cbValue)
131 {
132 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
133 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
134 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
135 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
136 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
137 }
138 rc = VINF_SUCCESS;
139 break;
140 }
141 if (rc != VINF_IOM_HC_MMIO_READ)
142 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
143 }
144 return rc;
145}
146
147
148/**
149 * Internal - statistics only.
150 */
151DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
152{
153#ifdef VBOX_WITH_STATISTICS
154 switch (cb)
155 {
156 case 1:
157 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
158 break;
159 case 2:
160 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
161 break;
162 case 4:
163 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
164 break;
165 case 8:
166 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
167 break;
168 default:
169 /* No way. */
170 AssertMsgFailed(("Invalid data length %d\n", cb));
171 break;
172 }
173#else
174 NOREF(pVM); NOREF(cb);
175#endif
176}
177
178
179/**
180 * MOV reg, mem (read)
181 * MOVZX reg, mem (read)
182 * MOVSX reg, mem (read)
183 *
184 * @returns VBox status code.
185 *
186 * @param pVM The virtual machine.
187 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
188 * @param pCpu Disassembler CPU state.
189 * @param pRange Pointer MMIO range.
190 * @param GCPhysFault The GC physical address corresponding to pvFault.
191 */
192static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
193{
194 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
195
196 /*
197 * Get the data size from parameter 2,
198 * and call the handler function to get the data.
199 */
200 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
201 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
202
203 uint64_t u64Data = 0;
204 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
205 if (rc == VINF_SUCCESS)
206 {
207 /*
208 * Do sign extension for MOVSX.
209 */
210 /** @todo checkup MOVSX implementation! */
211 if (pCpu->pCurInstr->opcode == OP_MOVSX)
212 {
213 if (cb == 1)
214 {
215 /* DWORD <- BYTE */
216 int64_t iData = (int8_t)u64Data;
217 u64Data = (uint64_t)iData;
218 }
219 else
220 {
221 /* DWORD <- WORD */
222 int64_t iData = (int16_t)u64Data;
223 u64Data = (uint64_t)iData;
224 }
225 }
226
227 /*
228 * Store the result to register (parameter 1).
229 */
230 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, u64Data);
231 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
232 }
233
234 if (rc == VINF_SUCCESS)
235 iomMMIOStatLength(pVM, cb);
236 return rc;
237}
238
239
240/**
241 * MOV mem, reg|imm (write)
242 *
243 * @returns VBox status code.
244 *
245 * @param pVM The virtual machine.
246 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
247 * @param pCpu Disassembler CPU state.
248 * @param pRange Pointer MMIO range.
249 * @param GCPhysFault The GC physical address corresponding to pvFault.
250 */
251static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
252{
253 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
254
255 /*
256 * Get data to write from second parameter,
257 * and call the callback to write it.
258 */
259 unsigned cb = 0;
260 uint64_t u64Data = 0;
261 bool fRc = iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &u64Data, &cb);
262 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
263
264 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
265 if (rc == VINF_SUCCESS)
266 iomMMIOStatLength(pVM, cb);
267 return rc;
268}
269
270
271/** Wrapper for reading virtual memory. */
272DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
273{
274 /* Note: This will fail in R0 or RC if it hits an access handler. That
275 isn't a problem though since the operation can be restarted in REM. */
276#ifdef IN_RC
277 return MMGCRamReadNoTrapHandler(pDest, (void *)GCSrc, cb);
278#else
279 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
280#endif
281}
282
283
284/** Wrapper for writing virtual memory. */
285DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
286{
287 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
288 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
289 * as well since we're not behind the pgm lock and handler may change between calls.
290 * MMGCRamWriteNoTrapHandler may also trap if the page isn't shadowed, or was kicked
291 * out from both the shadow pt (SMP or our changes) and TLB.
292 *
293 * Currently MMGCRamWriteNoTrapHandler may also fail when it hits a write access handler.
294 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr OTOH may mess up the state
295 * of some shadowed structure in R0. */
296#ifdef IN_RC
297 NOREF(pCtxCore);
298 return MMGCRamWriteNoTrapHandler((void *)GCPtrDst, pvSrc, cb);
299#elif IN_RING0
300 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
301#else
302 NOREF(pCtxCore);
303 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
304#endif
305}
306
307
308#ifdef IOM_WITH_MOVS_SUPPORT
309/**
310 * [REP] MOVSB
311 * [REP] MOVSW
312 * [REP] MOVSD
313 *
314 * Restricted implementation.
315 *
316 *
317 * @returns VBox status code.
318 *
319 * @param pVM The virtual machine.
320 * @param uErrorCode CPU Error code.
321 * @param pRegFrame Trap register frame.
322 * @param GCPhysFault The GC physical address corresponding to pvFault.
323 * @param pCpu Disassembler CPU state.
324 * @param pRange Pointer MMIO range.
325 * @param ppStat Which sub-sample to attribute this call to.
326 */
327static int iomInterpretMOVS(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PSTAMPROFILE *ppStat)
328{
329 /*
330 * We do not support segment prefixes or REPNE.
331 */
332 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
333 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
334
335 PVMCPU pVCpu = VMMGetCpu(pVM)
336
337 /*
338 * Get bytes/words/dwords/qword count to copy.
339 */
340 uint32_t cTransfers = 1;
341 if (pCpu->prefix & PREFIX_REP)
342 {
343#ifndef IN_RC
344 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
345 && pRegFrame->rcx >= _4G)
346 return VINF_EM_RAW_EMULATE_INSTR;
347#endif
348
349 cTransfers = pRegFrame->ecx;
350 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
351 cTransfers &= 0xffff;
352
353 if (!cTransfers)
354 return VINF_SUCCESS;
355 }
356
357 /* Get the current privilege level. */
358 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
359
360 /*
361 * Get data size.
362 */
363 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
364 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
365 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
366
367#ifdef VBOX_WITH_STATISTICS
368 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
369 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
370#endif
371
372/** @todo re-evaluate on page boundraries. */
373
374 RTGCPHYS Phys = GCPhysFault;
375 int rc;
376 if (uErrorCode & X86_TRAP_PF_RW)
377 {
378 /*
379 * Write operation: [Mem] -> [MMIO]
380 * ds:esi (Virt Src) -> es:edi (Phys Dst)
381 */
382 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
383
384 /* Check callback. */
385 if (!pRange->CTX_SUFF(pfnWriteCallback))
386 return VINF_IOM_HC_MMIO_WRITE;
387
388 /* Convert source address ds:esi. */
389 RTGCUINTPTR pu8Virt;
390 rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
391 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
392 (PRTGCPTR)&pu8Virt);
393 if (RT_SUCCESS(rc))
394 {
395
396 /* Access verification first; we currently can't recover properly from traps inside this instruction */
397 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
398 if (rc != VINF_SUCCESS)
399 {
400 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
401 return VINF_EM_RAW_EMULATE_INSTR;
402 }
403
404#ifdef IN_RC
405 MMGCRamRegisterTrapHandler(pVM);
406#endif
407
408 /* copy loop. */
409 while (cTransfers)
410 {
411 uint32_t u32Data = 0;
412 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
413 if (rc != VINF_SUCCESS)
414 break;
415 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
416 if (rc != VINF_SUCCESS)
417 break;
418
419 pu8Virt += offIncrement;
420 Phys += offIncrement;
421 pRegFrame->rsi += offIncrement;
422 pRegFrame->rdi += offIncrement;
423 cTransfers--;
424 }
425#ifdef IN_RC
426 MMGCRamDeregisterTrapHandler(pVM);
427#endif
428 /* Update ecx. */
429 if (pCpu->prefix & PREFIX_REP)
430 pRegFrame->ecx = cTransfers;
431 }
432 else
433 rc = VINF_IOM_HC_MMIO_READ_WRITE;
434 }
435 else
436 {
437 /*
438 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
439 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
440 */
441 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
442
443 /* Check callback. */
444 if (!pRange->CTX_SUFF(pfnReadCallback))
445 return VINF_IOM_HC_MMIO_READ;
446
447 /* Convert destination address. */
448 RTGCUINTPTR pu8Virt;
449 rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
450 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
451 (RTGCPTR *)&pu8Virt);
452 if (RT_FAILURE(rc))
453 return VINF_IOM_HC_MMIO_READ;
454
455 /* Check if destination address is MMIO. */
456 PIOMMMIORANGE pMMIODst;
457 RTGCPHYS PhysDst;
458 rc = PGMGstGetPage((RTGCPTR)pu8Virt, NULL, &PhysDst);
459 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
460 if ( RT_SUCCESS(rc)
461 && (pMMIODst = iomMMIOGetRange(&pVM->iom.s, PhysDst)))
462 {
463 /*
464 * Extra: [MMIO] -> [MMIO]
465 */
466 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
467 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
468 return VINF_IOM_HC_MMIO_READ_WRITE;
469
470 /* copy loop. */
471 while (cTransfers)
472 {
473 uint32_t u32Data;
474 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
475 if (rc != VINF_SUCCESS)
476 break;
477 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
478 if (rc != VINF_SUCCESS)
479 break;
480
481 Phys += offIncrement;
482 PhysDst += offIncrement;
483 pRegFrame->rsi += offIncrement;
484 pRegFrame->rdi += offIncrement;
485 cTransfers--;
486 }
487 }
488 else
489 {
490 /*
491 * Normal: [MMIO] -> [Mem]
492 */
493 /* Access verification first; we currently can't recover properly from traps inside this instruction */
494 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
495 if (rc != VINF_SUCCESS)
496 {
497 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
498 return VINF_EM_RAW_EMULATE_INSTR;
499 }
500
501 /* copy loop. */
502#ifdef IN_RC
503 MMGCRamRegisterTrapHandler(pVM);
504#endif
505 while (cTransfers)
506 {
507 uint32_t u32Data;
508 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
509 if (rc != VINF_SUCCESS)
510 break;
511 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
512 if (rc != VINF_SUCCESS)
513 {
514 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
515 break;
516 }
517
518 pu8Virt += offIncrement;
519 Phys += offIncrement;
520 pRegFrame->rsi += offIncrement;
521 pRegFrame->rdi += offIncrement;
522 cTransfers--;
523 }
524#ifdef IN_RC
525 MMGCRamDeregisterTrapHandler(pVM);
526#endif
527 }
528
529 /* Update ecx on exit. */
530 if (pCpu->prefix & PREFIX_REP)
531 pRegFrame->ecx = cTransfers;
532 }
533
534 /* work statistics. */
535 if (rc == VINF_SUCCESS)
536 iomMMIOStatLength(pVM, cb);
537 NOREF(ppStat);
538 return rc;
539}
540#endif /* IOM_WITH_MOVS_SUPPORT */
541
542
543/**
544 * [REP] STOSB
545 * [REP] STOSW
546 * [REP] STOSD
547 *
548 * Restricted implementation.
549 *
550 *
551 * @returns VBox status code.
552 *
553 * @param pVM The virtual machine.
554 * @param pRegFrame Trap register frame.
555 * @param GCPhysFault The GC physical address corresponding to pvFault.
556 * @param pCpu Disassembler CPU state.
557 * @param pRange Pointer MMIO range.
558 */
559static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
560{
561 /*
562 * We do not support segment prefixes or REPNE..
563 */
564 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REPNE))
565 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
566
567 /*
568 * Get bytes/words/dwords count to copy.
569 */
570 uint32_t cTransfers = 1;
571 if (pCpu->prefix & PREFIX_REP)
572 {
573#ifndef IN_RC
574 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM), pRegFrame)
575 && pRegFrame->rcx >= _4G)
576 return VINF_EM_RAW_EMULATE_INSTR;
577#endif
578
579 cTransfers = pRegFrame->ecx;
580 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
581 cTransfers &= 0xffff;
582
583 if (!cTransfers)
584 return VINF_SUCCESS;
585 }
586
587/** @todo r=bird: bounds checks! */
588
589 /*
590 * Get data size.
591 */
592 unsigned cb = DISGetParamSize(pCpu, &pCpu->param1);
593 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
594 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
595
596#ifdef VBOX_WITH_STATISTICS
597 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
598 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
599#endif
600
601
602 RTGCPHYS Phys = GCPhysFault;
603 uint32_t u32Data = pRegFrame->eax;
604 int rc;
605 if (pRange->CTX_SUFF(pfnFillCallback))
606 {
607 /*
608 * Use the fill callback.
609 */
610 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
611 if (offIncrement > 0)
612 {
613 /* addr++ variant. */
614 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys, u32Data, cb, cTransfers);
615 if (rc == VINF_SUCCESS)
616 {
617 /* Update registers. */
618 pRegFrame->rdi += cTransfers << SIZE_2_SHIFT(cb);
619 if (pCpu->prefix & PREFIX_REP)
620 pRegFrame->ecx = 0;
621 }
622 }
623 else
624 {
625 /* addr-- variant. */
626 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), (Phys - (cTransfers - 1)) << SIZE_2_SHIFT(cb), u32Data, cb, cTransfers);
627 if (rc == VINF_SUCCESS)
628 {
629 /* Update registers. */
630 pRegFrame->rdi -= cTransfers << SIZE_2_SHIFT(cb);
631 if (pCpu->prefix & PREFIX_REP)
632 pRegFrame->ecx = 0;
633 }
634 }
635 }
636 else
637 {
638 /*
639 * Use the write callback.
640 */
641 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
642
643 /* fill loop. */
644 do
645 {
646 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
647 if (rc != VINF_SUCCESS)
648 break;
649
650 Phys += offIncrement;
651 pRegFrame->rdi += offIncrement;
652 cTransfers--;
653 } while (cTransfers);
654
655 /* Update ecx on exit. */
656 if (pCpu->prefix & PREFIX_REP)
657 pRegFrame->ecx = cTransfers;
658 }
659
660 /*
661 * Work statistics and return.
662 */
663 if (rc == VINF_SUCCESS)
664 iomMMIOStatLength(pVM, cb);
665 return rc;
666}
667
668
669/**
670 * [REP] LODSB
671 * [REP] LODSW
672 * [REP] LODSD
673 *
674 * Restricted implementation.
675 *
676 *
677 * @returns VBox status code.
678 *
679 * @param pVM The virtual machine.
680 * @param pRegFrame Trap register frame.
681 * @param GCPhysFault The GC physical address corresponding to pvFault.
682 * @param pCpu Disassembler CPU state.
683 * @param pRange Pointer MMIO range.
684 */
685static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
686{
687 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
688
689 /*
690 * We do not support segment prefixes or REP*.
691 */
692 if (pCpu->prefix & (PREFIX_SEG | PREFIX_REP | PREFIX_REPNE))
693 return VINF_IOM_HC_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
694
695 /*
696 * Get data size.
697 */
698 unsigned cb = DISGetParamSize(pCpu, &pCpu->param2);
699 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
700 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
701
702 /*
703 * Perform read.
704 */
705 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
706 if (rc == VINF_SUCCESS)
707 pRegFrame->rsi += offIncrement;
708
709 /*
710 * Work statistics and return.
711 */
712 if (rc == VINF_SUCCESS)
713 iomMMIOStatLength(pVM, cb);
714 return rc;
715}
716
717
718/**
719 * CMP [MMIO], reg|imm
720 * CMP reg|imm, [MMIO]
721 *
722 * Restricted implementation.
723 *
724 *
725 * @returns VBox status code.
726 *
727 * @param pVM The virtual machine.
728 * @param pRegFrame Trap register frame.
729 * @param GCPhysFault The GC physical address corresponding to pvFault.
730 * @param pCpu Disassembler CPU state.
731 * @param pRange Pointer MMIO range.
732 */
733static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
734{
735 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
736
737 /*
738 * Get the operands.
739 */
740 unsigned cb = 0;
741 uint64_t uData1 = 0;
742 uint64_t uData2 = 0;
743 int rc;
744 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
745 /* cmp reg, [MMIO]. */
746 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
747 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
748 /* cmp [MMIO], reg|imm. */
749 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
750 else
751 {
752 AssertMsgFailed(("Disassember CMP problem..\n"));
753 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
754 }
755
756 if (rc == VINF_SUCCESS)
757 {
758 /* Emulate CMP and update guest flags. */
759 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
760 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
761 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
762 iomMMIOStatLength(pVM, cb);
763 }
764
765 return rc;
766}
767
768
769/**
770 * AND [MMIO], reg|imm
771 * AND reg, [MMIO]
772 * OR [MMIO], reg|imm
773 * OR reg, [MMIO]
774 *
775 * Restricted implementation.
776 *
777 *
778 * @returns VBox status code.
779 *
780 * @param pVM The virtual machine.
781 * @param pRegFrame Trap register frame.
782 * @param GCPhysFault The GC physical address corresponding to pvFault.
783 * @param pCpu Disassembler CPU state.
784 * @param pRange Pointer MMIO range.
785 * @param pfnEmulate Instruction emulation function.
786 */
787static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
788{
789 unsigned cb = 0;
790 uint64_t uData1 = 0;
791 uint64_t uData2 = 0;
792 bool fAndWrite;
793 int rc;
794
795#ifdef LOG_ENABLED
796 const char *pszInstr;
797
798 if (pCpu->pCurInstr->opcode == OP_XOR)
799 pszInstr = "Xor";
800 else if (pCpu->pCurInstr->opcode == OP_OR)
801 pszInstr = "Or";
802 else if (pCpu->pCurInstr->opcode == OP_AND)
803 pszInstr = "And";
804 else
805 pszInstr = "OrXorAnd??";
806#endif
807
808 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
809 {
810 /* and reg, [MMIO]. */
811 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
812 fAndWrite = false;
813 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
814 }
815 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
816 {
817 /* and [MMIO], reg|imm. */
818 fAndWrite = true;
819 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
820 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
821 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
822 else
823 rc = VINF_IOM_HC_MMIO_READ_WRITE;
824 }
825 else
826 {
827 AssertMsgFailed(("Disassember AND problem..\n"));
828 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
829 }
830
831 if (rc == VINF_SUCCESS)
832 {
833 /* Emulate AND and update guest flags. */
834 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
835
836 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
837
838 if (fAndWrite)
839 /* Store result to MMIO. */
840 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
841 else
842 {
843 /* Store result to register. */
844 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData1);
845 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
846 }
847 if (rc == VINF_SUCCESS)
848 {
849 /* Update guest's eflags and finish. */
850 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
851 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
852 iomMMIOStatLength(pVM, cb);
853 }
854 }
855
856 return rc;
857}
858
859
860/**
861 * TEST [MMIO], reg|imm
862 * TEST reg, [MMIO]
863 *
864 * Restricted implementation.
865 *
866 *
867 * @returns VBox status code.
868 *
869 * @param pVM The virtual machine.
870 * @param pRegFrame Trap register frame.
871 * @param GCPhysFault The GC physical address corresponding to pvFault.
872 * @param pCpu Disassembler CPU state.
873 * @param pRange Pointer MMIO range.
874 */
875static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
876{
877 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
878
879 unsigned cb = 0;
880 uint64_t uData1 = 0;
881 uint64_t uData2 = 0;
882 int rc;
883
884 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
885 {
886 /* and test, [MMIO]. */
887 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
888 }
889 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
890 {
891 /* test [MMIO], reg|imm. */
892 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
893 }
894 else
895 {
896 AssertMsgFailed(("Disassember TEST problem..\n"));
897 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
898 }
899
900 if (rc == VINF_SUCCESS)
901 {
902 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
903 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
904 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
905 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
906 iomMMIOStatLength(pVM, cb);
907 }
908
909 return rc;
910}
911
912
913/**
914 * BT [MMIO], reg|imm
915 *
916 * Restricted implementation.
917 *
918 *
919 * @returns VBox status code.
920 *
921 * @param pVM The virtual machine.
922 * @param pRegFrame Trap register frame.
923 * @param GCPhysFault The GC physical address corresponding to pvFault.
924 * @param pCpu Disassembler CPU state.
925 * @param pRange Pointer MMIO range.
926 */
927static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
928{
929 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
930
931 uint64_t uBit = 0;
932 uint64_t uData1 = 0;
933 unsigned cb = 0;
934 int rc;
935
936 if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uBit, &cb))
937 {
938 /* bt [MMIO], reg|imm. */
939 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
940 }
941 else
942 {
943 AssertMsgFailed(("Disassember BT problem..\n"));
944 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
945 }
946
947 if (rc == VINF_SUCCESS)
948 {
949 /* The size of the memory operand only matters here. */
950 cb = DISGetParamSize(pCpu, &pCpu->param1);
951
952 /* Find the bit inside the faulting address */
953 uBit &= (cb*8 - 1);
954
955 pRegFrame->eflags.Bits.u1CF = (uData1 >> uBit);
956 iomMMIOStatLength(pVM, cb);
957 }
958
959 return rc;
960}
961
962/**
963 * XCHG [MMIO], reg
964 * XCHG reg, [MMIO]
965 *
966 * Restricted implementation.
967 *
968 *
969 * @returns VBox status code.
970 *
971 * @param pVM The virtual machine.
972 * @param pRegFrame Trap register frame.
973 * @param GCPhysFault The GC physical address corresponding to pvFault.
974 * @param pCpu Disassembler CPU state.
975 * @param pRange Pointer MMIO range.
976 */
977static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
978{
979 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
980 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
981 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
982 return VINF_IOM_HC_MMIO_READ_WRITE;
983
984 int rc;
985 unsigned cb = 0;
986 uint64_t uData1 = 0;
987 uint64_t uData2 = 0;
988 if (iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &uData1, &cb))
989 {
990 /* xchg reg, [MMIO]. */
991 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
992 if (rc == VINF_SUCCESS)
993 {
994 /* Store result to MMIO. */
995 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
996
997 if (rc == VINF_SUCCESS)
998 {
999 /* Store result to register. */
1000 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param1, pRegFrame, uData2);
1001 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1002 }
1003 else
1004 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1005 }
1006 else
1007 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1008 }
1009 else if (iomGetRegImmData(pCpu, &pCpu->param2, pRegFrame, &uData2, &cb))
1010 {
1011 /* xchg [MMIO], reg. */
1012 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1013 if (rc == VINF_SUCCESS)
1014 {
1015 /* Store result to MMIO. */
1016 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1017 if (rc == VINF_SUCCESS)
1018 {
1019 /* Store result to register. */
1020 bool fRc = iomSaveDataToReg(pCpu, &pCpu->param2, pRegFrame, uData1);
1021 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1022 }
1023 else
1024 Assert(rc == VINF_IOM_HC_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1025 }
1026 else
1027 Assert(rc == VINF_IOM_HC_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1028 }
1029 else
1030 {
1031 AssertMsgFailed(("Disassember XCHG problem..\n"));
1032 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1033 }
1034 return rc;
1035}
1036
1037
1038/**
1039 * \#PF Handler callback for MMIO ranges.
1040 *
1041 * @returns VBox status code (appropriate for GC return).
1042 * @param pVM VM Handle.
1043 * @param uErrorCode CPU Error code.
1044 * @param pCtxCore Trap register frame.
1045 * @param pvFault The fault address (cr2).
1046 * @param GCPhysFault The GC physical address corresponding to pvFault.
1047 * @param pvUser Pointer to the MMIO ring-3 range entry.
1048 */
1049VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1050{
1051 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1052 Log(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1053 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1054
1055 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1056 Assert(pRange);
1057 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1058
1059#ifdef VBOX_WITH_STATISTICS
1060 /*
1061 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1062 */
1063 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhysFault, pRange);
1064 if (!pStats)
1065 {
1066# ifdef IN_RING3
1067 return VERR_NO_MEMORY;
1068# else
1069 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1070 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1071 return uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1072# endif
1073 }
1074#endif
1075
1076#ifndef IN_RING3
1077 /*
1078 * Should we defer the request right away?
1079 */
1080 if (uErrorCode & X86_TRAP_PF_RW
1081 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1082 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1083 {
1084# ifdef VBOX_WITH_STATISTICS
1085 if (uErrorCode & X86_TRAP_PF_RW)
1086 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1087 else
1088 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1089# endif
1090
1091 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1092 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1093 return uErrorCode & X86_TRAP_PF_RW ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1094 }
1095#endif /* !IN_RING3 */
1096
1097 /*
1098 * Disassemble the instruction and interpret it.
1099 */
1100 DISCPUSTATE Cpu;
1101 unsigned cbOp;
1102 int rc = EMInterpretDisasOne(pVM, VMMGetCpu(pVM), pCtxCore, &Cpu, &cbOp);
1103 AssertRCReturn(rc, rc);
1104 switch (Cpu.pCurInstr->opcode)
1105 {
1106 case OP_MOV:
1107 case OP_MOVZX:
1108 case OP_MOVSX:
1109 {
1110 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1111 if (uErrorCode & X86_TRAP_PF_RW)
1112 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, &Cpu, pRange, GCPhysFault);
1113 else
1114 rc = iomInterpretMOVxXRead(pVM, pCtxCore, &Cpu, pRange, GCPhysFault);
1115 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1116 break;
1117 }
1118
1119
1120#ifdef IOM_WITH_MOVS_SUPPORT
1121 case OP_MOVSB:
1122 case OP_MOVSWD:
1123 {
1124 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1125 PSTAMPROFILE pStat = NULL;
1126 rc = iomInterpretMOVS(pVM, uErrorCode, pCtxCore, GCPhysFault, &Cpu, pRange, &pStat);
1127 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1128 break;
1129 }
1130#endif
1131
1132 case OP_STOSB:
1133 case OP_STOSWD:
1134 Assert(uErrorCode & X86_TRAP_PF_RW);
1135 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1136 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1137 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1138 break;
1139
1140 case OP_LODSB:
1141 case OP_LODSWD:
1142 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1143 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1144 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1145 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1146 break;
1147
1148 case OP_CMP:
1149 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1150 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1151 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1152 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1153 break;
1154
1155 case OP_AND:
1156 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1157 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateAnd);
1158 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1159 break;
1160
1161 case OP_OR:
1162 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1163 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateOr);
1164 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1165 break;
1166
1167 case OP_XOR:
1168 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1169 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, &Cpu, pRange, EMEmulateXor);
1170 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1171 break;
1172
1173 case OP_TEST:
1174 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1175 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1176 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1177 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1178 break;
1179
1180 case OP_BT:
1181 Assert(!(uErrorCode & X86_TRAP_PF_RW));
1182 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1183 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1184 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1185 break;
1186
1187 case OP_XCHG:
1188 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1189 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, &Cpu, pRange);
1190 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1191 break;
1192
1193
1194 /*
1195 * The instruction isn't supported. Hand it on to ring-3.
1196 */
1197 default:
1198 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1199 rc = (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
1200 break;
1201 }
1202
1203 /*
1204 * On success advance EIP.
1205 */
1206 if (rc == VINF_SUCCESS)
1207 pCtxCore->rip += cbOp;
1208 else
1209 {
1210 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1211#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1212 switch (rc)
1213 {
1214 case VINF_IOM_HC_MMIO_READ:
1215 case VINF_IOM_HC_MMIO_READ_WRITE:
1216 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1217 break;
1218 case VINF_IOM_HC_MMIO_WRITE:
1219 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1220 break;
1221 }
1222#endif
1223 }
1224
1225 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1226 return rc;
1227}
1228
1229
1230#ifdef IN_RING3
1231/**
1232 * \#PF Handler callback for MMIO ranges.
1233 *
1234 * @returns VINF_SUCCESS if the handler have carried out the operation.
1235 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1236 * @param pVM VM Handle.
1237 * @param GCPhys The physical address the guest is writing to.
1238 * @param pvPhys The HC mapping of that address.
1239 * @param pvBuf What the guest is reading/writing.
1240 * @param cbBuf How much it's reading/writing.
1241 * @param enmAccessType The access type.
1242 * @param pvUser Pointer to the MMIO range entry.
1243 */
1244DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1245{
1246 int rc;
1247 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1248 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1249
1250 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1251
1252 Assert(pRange);
1253 Assert(pRange == iomMMIOGetRange(&pVM->iom.s, GCPhysFault));
1254
1255 if (enmAccessType == PGMACCESSTYPE_READ)
1256 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1257 else
1258 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1259
1260 AssertRC(rc);
1261 return rc;
1262}
1263#endif /* IN_RING3 */
1264
1265
1266/**
1267 * Reads a MMIO register.
1268 *
1269 * @returns VBox status code.
1270 *
1271 * @param pVM VM handle.
1272 * @param GCPhys The physical address to read.
1273 * @param pu32Value Where to store the value read.
1274 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1275 */
1276VMMDECL(int) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1277{
1278 /*
1279 * Lookup the current context range node and statistics.
1280 */
1281 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1282 AssertMsgReturn(pRange,
1283 ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue),
1284 VERR_INTERNAL_ERROR);
1285#ifdef VBOX_WITH_STATISTICS
1286 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1287 if (!pStats)
1288# ifdef IN_RING3
1289 return VERR_NO_MEMORY;
1290# else
1291 return VINF_IOM_HC_MMIO_READ;
1292# endif
1293#endif /* VBOX_WITH_STATISTICS */
1294 if (pRange->CTX_SUFF(pfnReadCallback))
1295 {
1296 /*
1297 * Perform the read and deal with the result.
1298 */
1299#ifdef VBOX_WITH_STATISTICS
1300 STAM_PROFILE_ADV_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1301#endif
1302 int rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pu32Value, (unsigned)cbValue);
1303#ifdef VBOX_WITH_STATISTICS
1304 STAM_PROFILE_ADV_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1305 if (rc != VINF_IOM_HC_MMIO_READ)
1306 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
1307#endif
1308 switch (rc)
1309 {
1310 case VINF_SUCCESS:
1311 default:
1312 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1313 return rc;
1314
1315 case VINF_IOM_MMIO_UNUSED_00:
1316 switch (cbValue)
1317 {
1318 case 1: *(uint8_t *)pu32Value = UINT8_C(0x00); break;
1319 case 2: *(uint16_t *)pu32Value = UINT16_C(0x0000); break;
1320 case 4: *(uint32_t *)pu32Value = UINT32_C(0x00000000); break;
1321 case 8: *(uint64_t *)pu32Value = UINT64_C(0x0000000000000000); break;
1322 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1323 }
1324 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1325 return VINF_SUCCESS;
1326
1327 case VINF_IOM_MMIO_UNUSED_FF:
1328 switch (cbValue)
1329 {
1330 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1331 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1332 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1333 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1334 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1335 }
1336 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, rc));
1337 return VINF_SUCCESS;
1338 }
1339 }
1340#ifndef IN_RING3
1341 if (pRange->pfnReadCallbackR3)
1342 {
1343 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1344 return VINF_IOM_HC_MMIO_READ;
1345 }
1346#endif
1347
1348 /*
1349 * Lookup the ring-3 range.
1350 */
1351#ifdef VBOX_WITH_STATISTICS
1352 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Read));
1353#endif
1354 /* Unassigned memory; this is actually not supposed to happen. */
1355 switch (cbValue)
1356 {
1357 case 1: *(uint8_t *)pu32Value = UINT8_C(0xff); break;
1358 case 2: *(uint16_t *)pu32Value = UINT16_C(0xffff); break;
1359 case 4: *(uint32_t *)pu32Value = UINT32_C(0xffffffff); break;
1360 case 8: *(uint64_t *)pu32Value = UINT64_C(0xffffffffffffffff); break;
1361 default: AssertReleaseMsgFailed(("cbValue=%d GCPhys=%RGp\n", cbValue, GCPhys)); break;
1362 }
1363 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1364 return VINF_SUCCESS;
1365}
1366
1367
1368/**
1369 * Writes to a MMIO register.
1370 *
1371 * @returns VBox status code.
1372 *
1373 * @param pVM VM handle.
1374 * @param GCPhys The physical address to write to.
1375 * @param u32Value The value to write.
1376 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1377 */
1378VMMDECL(int) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1379{
1380 /*
1381 * Lookup the current context range node.
1382 */
1383 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1384 AssertMsgReturn(pRange,
1385 ("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue),
1386 VERR_INTERNAL_ERROR);
1387#ifdef VBOX_WITH_STATISTICS
1388 PIOMMMIOSTATS pStats = iomMMIOGetStats(&pVM->iom.s, GCPhys, pRange);
1389 if (!pStats)
1390# ifdef IN_RING3
1391 return VERR_NO_MEMORY;
1392# else
1393 return VINF_IOM_HC_MMIO_WRITE;
1394# endif
1395#endif /* VBOX_WITH_STATISTICS */
1396
1397 /*
1398 * Perform the write if there's a write handler. R0/GC may have
1399 * to defer it to ring-3.
1400 */
1401 if (pRange->CTX_SUFF(pfnWriteCallback))
1402 {
1403#ifdef VBOX_WITH_STATISTICS
1404 STAM_PROFILE_ADV_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1405#endif
1406 int rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, &u32Value, (unsigned)cbValue);
1407#ifdef VBOX_WITH_STATISTICS
1408 STAM_PROFILE_ADV_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1409 if (rc != VINF_IOM_HC_MMIO_WRITE)
1410 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
1411#endif
1412 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, rc));
1413 return rc;
1414 }
1415#ifndef IN_RING3
1416 if (pRange->pfnWriteCallbackR3)
1417 {
1418 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1419 return VINF_IOM_HC_MMIO_WRITE;
1420 }
1421#endif
1422
1423 /*
1424 * No write handler, nothing to do.
1425 */
1426#ifdef VBOX_WITH_STATISTICS
1427 STAM_COUNTER_INC(&pStats->CTX_SUFF_Z(Write));
1428#endif
1429 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1430 return VINF_SUCCESS;
1431}
1432
1433
1434/**
1435 * [REP*] INSB/INSW/INSD
1436 * ES:EDI,DX[,ECX]
1437 *
1438 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1439 *
1440 * @returns Strict VBox status code. Informational status codes other than the one documented
1441 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1442 * @retval VINF_SUCCESS Success.
1443 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1444 * status code must be passed on to EM.
1445 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1446 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1447 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1448 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1449 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1450 *
1451 * @param pVM The virtual machine.
1452 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1453 * @param uPort IO Port
1454 * @param uPrefix IO instruction prefix
1455 * @param cbTransfer Size of transfer unit
1456 */
1457VMMDECL(int) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1458{
1459#ifdef VBOX_WITH_STATISTICS
1460 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
1461#endif
1462
1463 /*
1464 * We do not support REPNE or decrementing destination
1465 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
1466 */
1467 if ( (uPrefix & PREFIX_REPNE)
1468 || pRegFrame->eflags.Bits.u1DF)
1469 return VINF_EM_RAW_EMULATE_INSTR;
1470
1471 PVMCPU pVCpu = VMMGetCpu(pVM);
1472
1473 /*
1474 * Get bytes/words/dwords count to transfer.
1475 */
1476 RTGCUINTREG cTransfers = 1;
1477 if (uPrefix & PREFIX_REP)
1478 {
1479#ifndef IN_RC
1480 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1481 && pRegFrame->rcx >= _4G)
1482 return VINF_EM_RAW_EMULATE_INSTR;
1483#endif
1484 cTransfers = pRegFrame->ecx;
1485
1486 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1487 cTransfers &= 0xffff;
1488
1489 if (!cTransfers)
1490 return VINF_SUCCESS;
1491 }
1492
1493 /* Convert destination address es:edi. */
1494 RTGCPTR GCPtrDst;
1495 int rc = SELMToFlatEx(pVM, DIS_SELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1496 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1497 &GCPtrDst);
1498 if (RT_FAILURE(rc))
1499 {
1500 Log(("INS destination address conversion failed -> fallback, rc=%d\n", rc));
1501 return VINF_EM_RAW_EMULATE_INSTR;
1502 }
1503
1504 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
1505 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1506
1507 rc = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
1508 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1509 if (rc != VINF_SUCCESS)
1510 {
1511 Log(("INS will generate a trap -> fallback, rc=%d\n", rc));
1512 return VINF_EM_RAW_EMULATE_INSTR;
1513 }
1514
1515 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1516 if (cTransfers > 1)
1517 {
1518 /* If the device supports string transfers, ask it to do as
1519 * much as it wants. The rest is done with single-word transfers. */
1520 const RTGCUINTREG cTransfersOrg = cTransfers;
1521 rc = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
1522 AssertRC(rc); Assert(cTransfers <= cTransfersOrg);
1523 pRegFrame->rdi += (cTransfersOrg - cTransfers) * cbTransfer;
1524 }
1525
1526#ifdef IN_RC
1527 MMGCRamRegisterTrapHandler(pVM);
1528#endif
1529
1530 while (cTransfers && rc == VINF_SUCCESS)
1531 {
1532 uint32_t u32Value;
1533 rc = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
1534 if (!IOM_SUCCESS(rc))
1535 break;
1536 int rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
1537 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
1538 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
1539 pRegFrame->rdi += cbTransfer;
1540 cTransfers--;
1541 }
1542#ifdef IN_RC
1543 MMGCRamDeregisterTrapHandler(pVM);
1544#endif
1545
1546 /* Update ecx on exit. */
1547 if (uPrefix & PREFIX_REP)
1548 pRegFrame->ecx = cTransfers;
1549
1550 AssertMsg(rc == VINF_SUCCESS || rc == VINF_IOM_HC_IOPORT_READ || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) || RT_FAILURE(rc), ("%Rrc\n", rc));
1551 return rc;
1552}
1553
1554
1555/**
1556 * [REP*] INSB/INSW/INSD
1557 * ES:EDI,DX[,ECX]
1558 *
1559 * @returns Strict VBox status code. Informational status codes other than the one documented
1560 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1561 * @retval VINF_SUCCESS Success.
1562 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1563 * status code must be passed on to EM.
1564 * @retval VINF_IOM_HC_IOPORT_READ Defer the read to ring-3. (R0/GC only)
1565 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
1566 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1567 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1568 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1569 *
1570 * @param pVM The virtual machine.
1571 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1572 * @param pCpu Disassembler CPU state.
1573 */
1574VMMDECL(int) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1575{
1576 /*
1577 * Get port number directly from the register (no need to bother the
1578 * disassembler). And get the I/O register size from the opcode / prefix.
1579 */
1580 RTIOPORT Port = pRegFrame->edx & 0xffff;
1581 unsigned cb = 0;
1582 if (pCpu->pCurInstr->opcode == OP_INSB)
1583 cb = 1;
1584 else
1585 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1586
1587 int rc = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1588 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1589 {
1590 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED || rc == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rc), ("%Rrc\n", rc));
1591 return rc;
1592 }
1593
1594 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1595}
1596
1597
1598/**
1599 * [REP*] OUTSB/OUTSW/OUTSD
1600 * DS:ESI,DX[,ECX]
1601 *
1602 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
1603 *
1604 * @returns Strict VBox status code. Informational status codes other than the one documented
1605 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1606 * @retval VINF_SUCCESS Success.
1607 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1608 * status code must be passed on to EM.
1609 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1610 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1611 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1612 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1613 *
1614 * @param pVM The virtual machine.
1615 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1616 * @param uPort IO Port
1617 * @param uPrefix IO instruction prefix
1618 * @param cbTransfer Size of transfer unit
1619 */
1620VMMDECL(int) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix, uint32_t cbTransfer)
1621{
1622#ifdef VBOX_WITH_STATISTICS
1623 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
1624#endif
1625
1626 /*
1627 * We do not support segment prefixes, REPNE or
1628 * decrementing source pointer.
1629 */
1630 if ( (uPrefix & (PREFIX_SEG | PREFIX_REPNE))
1631 || pRegFrame->eflags.Bits.u1DF)
1632 return VINF_EM_RAW_EMULATE_INSTR;
1633
1634 PVMCPU pVCpu = VMMGetCpu(pVM);
1635
1636 /*
1637 * Get bytes/words/dwords count to transfer.
1638 */
1639 RTGCUINTREG cTransfers = 1;
1640 if (uPrefix & PREFIX_REP)
1641 {
1642#ifndef IN_RC
1643 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
1644 && pRegFrame->rcx >= _4G)
1645 return VINF_EM_RAW_EMULATE_INSTR;
1646#endif
1647 cTransfers = pRegFrame->ecx;
1648 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == CPUMODE_16BIT)
1649 cTransfers &= 0xffff;
1650
1651 if (!cTransfers)
1652 return VINF_SUCCESS;
1653 }
1654
1655 /* Convert source address ds:esi. */
1656 RTGCPTR GCPtrSrc;
1657 int rc = SELMToFlatEx(pVM, DIS_SELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
1658 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1659 &GCPtrSrc);
1660 if (RT_FAILURE(rc))
1661 {
1662 Log(("OUTS source address conversion failed -> fallback, rc=%Rrc\n", rc));
1663 return VINF_EM_RAW_EMULATE_INSTR;
1664 }
1665
1666 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1667 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
1668 rc = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
1669 (cpl == 3) ? X86_PTE_US : 0);
1670 if (rc != VINF_SUCCESS)
1671 {
1672 Log(("OUTS will generate a trap -> fallback, rc=%Rrc\n", rc));
1673 return VINF_EM_RAW_EMULATE_INSTR;
1674 }
1675
1676 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
1677 if (cTransfers > 1)
1678 {
1679 /*
1680 * If the device supports string transfers, ask it to do as
1681 * much as it wants. The rest is done with single-word transfers.
1682 */
1683 const RTGCUINTREG cTransfersOrg = cTransfers;
1684 rc = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
1685 AssertRC(rc); Assert(cTransfers <= cTransfersOrg);
1686 pRegFrame->rsi += (cTransfersOrg - cTransfers) * cbTransfer;
1687 }
1688
1689#ifdef IN_RC
1690 MMGCRamRegisterTrapHandler(pVM);
1691#endif
1692
1693 while (cTransfers && rc == VINF_SUCCESS)
1694 {
1695 uint32_t u32Value;
1696 rc = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
1697 if (rc != VINF_SUCCESS)
1698 break;
1699 rc = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
1700 if (!IOM_SUCCESS(rc))
1701 break;
1702 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
1703 pRegFrame->rsi += cbTransfer;
1704 cTransfers--;
1705 }
1706
1707#ifdef IN_RC
1708 MMGCRamDeregisterTrapHandler(pVM);
1709#endif
1710
1711 /* Update ecx on exit. */
1712 if (uPrefix & PREFIX_REP)
1713 pRegFrame->ecx = cTransfers;
1714
1715 AssertMsg(rc == VINF_SUCCESS || rc == VINF_IOM_HC_IOPORT_WRITE || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) || RT_FAILURE(rc), ("%Rrc\n", rc));
1716 return rc;
1717}
1718
1719
1720/**
1721 * [REP*] OUTSB/OUTSW/OUTSD
1722 * DS:ESI,DX[,ECX]
1723 *
1724 * @returns Strict VBox status code. Informational status codes other than the one documented
1725 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
1726 * @retval VINF_SUCCESS Success.
1727 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
1728 * status code must be passed on to EM.
1729 * @retval VINF_IOM_HC_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
1730 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
1731 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
1732 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
1733 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
1734 *
1735 * @param pVM The virtual machine.
1736 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
1737 * @param pCpu Disassembler CPU state.
1738 */
1739VMMDECL(int) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
1740{
1741 /*
1742 * Get port number from the first parameter.
1743 * And get the I/O register size from the opcode / prefix.
1744 */
1745 uint64_t Port = 0;
1746 unsigned cb = 0;
1747 bool fRc = iomGetRegImmData(pCpu, &pCpu->param1, pRegFrame, &Port, &cb);
1748 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
1749 if (pCpu->pCurInstr->opcode == OP_OUTSB)
1750 cb = 1;
1751 else
1752 cb = (pCpu->opmode == CPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
1753
1754 int rc = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
1755 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1756 {
1757 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED || rc == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rc), ("%Rrc\n", rc));
1758 return rc;
1759 }
1760
1761 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->prefix, cb);
1762}
1763
1764
1765#ifndef IN_RC
1766/**
1767 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1768 *
1769 * (This is a special optimization used by the VGA device.)
1770 *
1771 * @returns VBox status code.
1772 *
1773 * @param pVM The virtual machine.
1774 * @param GCPhys The address of the MMIO page to be changed.
1775 * @param GCPhysRemapped The address of the MMIO2 page.
1776 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1777 * for the time being.
1778 */
1779VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1780{
1781 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1782
1783 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1784
1785 PVMCPU pVCpu = VMMGetCpu(pVM);
1786
1787 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1788 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1789 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1790 && !HWACCMIsNestedPagingActive(pVM)))
1791 return VINF_SUCCESS; /* ignore */
1792
1793 /*
1794 * Lookup the context range node the page belongs to.
1795 */
1796 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1797 AssertMsgReturn(pRange,
1798 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys),
1799 VERR_IOM_MMIO_RANGE_NOT_FOUND);
1800 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1801 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1802
1803 /*
1804 * Do the aliasing; page align the addresses since PGM is picky.
1805 */
1806 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1807 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1808
1809 int rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1810 AssertRCReturn(rc, rc);
1811
1812 /*
1813 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1814 * can simply prefetch it.
1815 *
1816 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1817 */
1818#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1819# ifdef VBOX_STRICT
1820 uint64_t fFlags;
1821 RTHCPHYS HCPhys;
1822 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1823 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1824# endif
1825#endif
1826 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1827 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1828 return VINF_SUCCESS;
1829}
1830
1831
1832/**
1833 * Reset a previously modified MMIO region; restore the access flags.
1834 *
1835 * @returns VBox status code.
1836 *
1837 * @param pVM The virtual machine.
1838 * @param GCPhys Physical address that's part of the MMIO region to be reset.
1839 */
1840VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
1841{
1842 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
1843
1844 PVMCPU pVCpu = VMMGetCpu(pVM);
1845
1846 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1847 if ( !HWACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1848 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1849 && !HWACCMIsNestedPagingActive(pVM)))
1850 return VINF_SUCCESS; /* ignore */
1851
1852 /*
1853 * Lookup the context range node the page belongs to.
1854 */
1855 PIOMMMIORANGE pRange = iomMMIOGetRange(&pVM->iom.s, GCPhys);
1856 AssertMsgReturn(pRange,
1857 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys),
1858 VERR_IOM_MMIO_RANGE_NOT_FOUND);
1859
1860 /*
1861 * Call PGM to do the job work.
1862 *
1863 * After the call, all the pages should be non-present... unless there is
1864 * a page pool flush pending (unlikely).
1865 */
1866 int rc = PGMHandlerPhysicalReset(pVM, pRange->GCPhys);
1867 AssertRC(rc);
1868
1869#ifdef VBOX_STRICT
1870 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
1871 {
1872 uint32_t cb = pRange->cb;
1873 GCPhys = pRange->GCPhys;
1874 while (cb)
1875 {
1876 uint64_t fFlags;
1877 RTHCPHYS HCPhys;
1878 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1879 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1880 cb -= PAGE_SIZE;
1881 GCPhys += PAGE_SIZE;
1882 }
1883 }
1884#endif
1885 return rc;
1886}
1887#endif /* !IN_RC */
1888
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette