VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 60309

Last change on this file since 60309 was 59073, checked in by vboxsync, 9 years ago

VMM: VINF_EM_DBG_EVENT and DBGFEventGenericWithArg implementation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 98.7 KB
Line 
1/* $Id: IOMAllMMIO.cpp 59073 2015-12-10 12:48:03Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52/** @def IEM_USE_IEM_INSTEAD
53 * Use IEM instead of IOM for interpreting MMIO accesses.
54 * Because of PATM/CSAM issues in raw-mode, we've split this up into 2nd and 3rd
55 * IEM deployment step. */
56#if ((defined(IN_RING3) || defined(IN_RING0)) && defined(VBOX_WITH_2ND_IEM_STEP)) \
57 || defined(VBOX_WITH_3RD_IEM_STEP) || defined(DOXYGEN_RUNNING)
58# define IEM_USE_IEM_INSTEAD
59#endif
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65
66/**
67 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
68 */
69static const unsigned g_aSize2Shift[] =
70{
71 ~0U, /* 0 - invalid */
72 0, /* *1 == 2^0 */
73 1, /* *2 == 2^1 */
74 ~0U, /* 3 - invalid */
75 2, /* *4 == 2^2 */
76 ~0U, /* 5 - invalid */
77 ~0U, /* 6 - invalid */
78 ~0U, /* 7 - invalid */
79 3 /* *8 == 2^3 */
80};
81
82/**
83 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
84 */
85#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
86
87
88/**
89 * Returns the contents of register or immediate data of instruction's parameter.
90 *
91 * @returns true on success.
92 *
93 * @todo Get rid of this code. Use DISQueryParamVal instead
94 *
95 * @param pCpu Pointer to current disassembler context.
96 * @param pParam Pointer to parameter of instruction to process.
97 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
98 * @param pu64Data Where to store retrieved data.
99 * @param pcbSize Where to store the size of data (1, 2, 4, 8).
100 */
101bool iomGetRegImmData(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t *pu64Data, unsigned *pcbSize)
102{
103 NOREF(pCpu);
104 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32))
105 {
106 *pcbSize = 0;
107 *pu64Data = 0;
108 return false;
109 }
110
111 /* divide and conquer */
112 if (pParam->fUse & (DISUSE_REG_GEN64 | DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8))
113 {
114 if (pParam->fUse & DISUSE_REG_GEN32)
115 {
116 *pcbSize = 4;
117 DISFetchReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t *)pu64Data);
118 return true;
119 }
120
121 if (pParam->fUse & DISUSE_REG_GEN16)
122 {
123 *pcbSize = 2;
124 DISFetchReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t *)pu64Data);
125 return true;
126 }
127
128 if (pParam->fUse & DISUSE_REG_GEN8)
129 {
130 *pcbSize = 1;
131 DISFetchReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t *)pu64Data);
132 return true;
133 }
134
135 Assert(pParam->fUse & DISUSE_REG_GEN64);
136 *pcbSize = 8;
137 DISFetchReg64(pRegFrame, pParam->Base.idxGenReg, pu64Data);
138 return true;
139 }
140 else
141 {
142 if (pParam->fUse & (DISUSE_IMMEDIATE64 | DISUSE_IMMEDIATE64_SX8))
143 {
144 *pcbSize = 8;
145 *pu64Data = pParam->uValue;
146 return true;
147 }
148
149 if (pParam->fUse & (DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8))
150 {
151 *pcbSize = 4;
152 *pu64Data = (uint32_t)pParam->uValue;
153 return true;
154 }
155
156 if (pParam->fUse & (DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE16_SX8))
157 {
158 *pcbSize = 2;
159 *pu64Data = (uint16_t)pParam->uValue;
160 return true;
161 }
162
163 if (pParam->fUse & DISUSE_IMMEDIATE8)
164 {
165 *pcbSize = 1;
166 *pu64Data = (uint8_t)pParam->uValue;
167 return true;
168 }
169
170 if (pParam->fUse & DISUSE_REG_SEG)
171 {
172 *pcbSize = 2;
173 DISFetchRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL *)pu64Data);
174 return true;
175 } /* Else - error. */
176
177 AssertFailed();
178 *pcbSize = 0;
179 *pu64Data = 0;
180 return false;
181 }
182}
183
184
185/**
186 * Saves data to 8/16/32 general purpose or segment register defined by
187 * instruction's parameter.
188 *
189 * @returns true on success.
190 * @param pCpu Pointer to current disassembler context.
191 * @param pParam Pointer to parameter of instruction to process.
192 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
193 * @param u64Data 8/16/32/64 bit data to store.
194 */
195bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t u64Data)
196{
197 NOREF(pCpu);
198 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32 | DISUSE_DISPLACEMENT64 | DISUSE_IMMEDIATE8 | DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE16_SX8))
199 {
200 return false;
201 }
202
203 if (pParam->fUse & DISUSE_REG_GEN32)
204 {
205 DISWriteReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t)u64Data);
206 return true;
207 }
208
209 if (pParam->fUse & DISUSE_REG_GEN64)
210 {
211 DISWriteReg64(pRegFrame, pParam->Base.idxGenReg, u64Data);
212 return true;
213 }
214
215 if (pParam->fUse & DISUSE_REG_GEN16)
216 {
217 DISWriteReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t)u64Data);
218 return true;
219 }
220
221 if (pParam->fUse & DISUSE_REG_GEN8)
222 {
223 DISWriteReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t)u64Data);
224 return true;
225 }
226
227 if (pParam->fUse & DISUSE_REG_SEG)
228 {
229 DISWriteRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL)u64Data);
230 return true;
231 }
232
233 /* Else - error. */
234 return false;
235}
236
237
238/**
239 * Deals with complicated MMIO writes.
240 *
241 * Complicated means unaligned or non-dword/qword sized accesses depending on
242 * the MMIO region's access mode flags.
243 *
244 * @returns Strict VBox status code. Any EM scheduling status code,
245 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
246 * VINF_IOM_R3_MMIO_READ may be returned.
247 *
248 * @param pVM The cross context VM structure.
249 * @param pRange The range to write to.
250 * @param GCPhys The physical address to start writing.
251 * @param pvValue Where to store the value.
252 * @param cbValue The size of the value to write.
253 */
254static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
255{
256 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
257 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
258 VERR_IOM_MMIO_IPE_1);
259 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
260 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
261 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
262 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
263
264 /*
265 * Do debug stop if requested.
266 */
267 int rc = VINF_SUCCESS; NOREF(pVM);
268#ifdef VBOX_STRICT
269 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
270 {
271# ifdef IN_RING3
272 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
273 R3STRING(pRange->pszDesc)));
274 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
275 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
276 if (rc == VERR_DBGF_NOT_ATTACHED)
277 rc = VINF_SUCCESS;
278# else
279 return VINF_IOM_R3_MMIO_WRITE;
280# endif
281 }
282#endif
283
284 /*
285 * Check if we should ignore the write.
286 */
287 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
288 {
289 Assert(cbValue != 4 || (GCPhys & 3));
290 return VINF_SUCCESS;
291 }
292 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
293 {
294 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
295 return VINF_SUCCESS;
296 }
297
298 /*
299 * Split and conquer.
300 */
301 for (;;)
302 {
303 unsigned const offAccess = GCPhys & 3;
304 unsigned cbThisPart = 4 - offAccess;
305 if (cbThisPart > cbValue)
306 cbThisPart = cbValue;
307
308 /*
309 * Get the missing bits (if any).
310 */
311 uint32_t u32MissingValue = 0;
312 if (fReadMissing && cbThisPart != 4)
313 {
314 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
315 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
316 switch (rc2)
317 {
318 case VINF_SUCCESS:
319 break;
320 case VINF_IOM_MMIO_UNUSED_FF:
321 u32MissingValue = UINT32_C(0xffffffff);
322 break;
323 case VINF_IOM_MMIO_UNUSED_00:
324 u32MissingValue = 0;
325 break;
326 case VINF_IOM_R3_MMIO_READ:
327 case VINF_IOM_R3_MMIO_READ_WRITE:
328 case VINF_IOM_R3_MMIO_WRITE:
329 /** @todo What if we've split a transfer and already read
330 * something? Since writes generally have sideeffects we
331 * could be kind of screwed here...
332 *
333 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
334 * to REM for MMIO accesses (like may currently do). */
335
336 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
337 return rc2;
338 default:
339 if (RT_FAILURE(rc2))
340 {
341 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
342 return rc2;
343 }
344 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
345 if (rc == VINF_SUCCESS || rc2 < rc)
346 rc = rc2;
347 break;
348 }
349 }
350
351 /*
352 * Merge missing and given bits.
353 */
354 uint32_t u32GivenMask;
355 uint32_t u32GivenValue;
356 switch (cbThisPart)
357 {
358 case 1:
359 u32GivenValue = *(uint8_t const *)pvValue;
360 u32GivenMask = UINT32_C(0x000000ff);
361 break;
362 case 2:
363 u32GivenValue = *(uint16_t const *)pvValue;
364 u32GivenMask = UINT32_C(0x0000ffff);
365 break;
366 case 3:
367 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
368 ((uint8_t const *)pvValue)[2], 0);
369 u32GivenMask = UINT32_C(0x00ffffff);
370 break;
371 case 4:
372 u32GivenValue = *(uint32_t const *)pvValue;
373 u32GivenMask = UINT32_C(0xffffffff);
374 break;
375 default:
376 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
377 }
378 if (offAccess)
379 {
380 u32GivenValue <<= offAccess * 8;
381 u32GivenMask <<= offAccess * 8;
382 }
383
384 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
385 | (u32GivenValue & u32GivenMask);
386
387 /*
388 * Do DWORD write to the device.
389 */
390 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
391 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
392 switch (rc2)
393 {
394 case VINF_SUCCESS:
395 break;
396 case VINF_IOM_R3_MMIO_READ:
397 case VINF_IOM_R3_MMIO_READ_WRITE:
398 case VINF_IOM_R3_MMIO_WRITE:
399 /** @todo What if we've split a transfer and already read
400 * something? Since reads can have sideeffects we could be
401 * kind of screwed here...
402 *
403 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
404 * to REM for MMIO accesses (like may currently do). */
405 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
406 return rc2;
407 default:
408 if (RT_FAILURE(rc2))
409 {
410 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
411 return rc2;
412 }
413 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
414 if (rc == VINF_SUCCESS || rc2 < rc)
415 rc = rc2;
416 break;
417 }
418
419 /*
420 * Advance.
421 */
422 cbValue -= cbThisPart;
423 if (!cbValue)
424 break;
425 GCPhys += cbThisPart;
426 pvValue = (uint8_t const *)pvValue + cbThisPart;
427 }
428
429 return rc;
430}
431
432
433
434
435/**
436 * Wrapper which does the write and updates range statistics when such are enabled.
437 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
438 */
439static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
440 const void *pvData, unsigned cb)
441{
442#ifdef VBOX_WITH_STATISTICS
443 int rcSem = IOM_LOCK_SHARED(pVM);
444 if (rcSem == VERR_SEM_BUSY)
445 return VINF_IOM_R3_MMIO_WRITE;
446 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
447 if (!pStats)
448# ifdef IN_RING3
449 return VERR_NO_MEMORY;
450# else
451 return VINF_IOM_R3_MMIO_WRITE;
452# endif
453 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
454#else
455 NOREF(pVCpu);
456#endif
457
458 VBOXSTRICTRC rcStrict;
459 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
460 {
461 if ( (cb == 4 && !(GCPhysFault & 3))
462 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
463 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
464 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
465 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
466 else
467 rcStrict = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
468 }
469 else
470 rcStrict = VINF_SUCCESS;
471
472 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
473 STAM_COUNTER_INC(&pStats->Accesses);
474 return rcStrict;
475}
476
477
478/**
479 * Deals with complicated MMIO reads.
480 *
481 * Complicated means unaligned or non-dword/qword sized accesses depending on
482 * the MMIO region's access mode flags.
483 *
484 * @returns Strict VBox status code. Any EM scheduling status code,
485 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
486 * VINF_IOM_R3_MMIO_WRITE may be returned.
487 *
488 * @param pVM The cross context VM structure.
489 * @param pRange The range to read from.
490 * @param GCPhys The physical address to start reading.
491 * @param pvValue Where to store the value.
492 * @param cbValue The size of the value to read.
493 */
494static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
495{
496 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
497 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
498 VERR_IOM_MMIO_IPE_1);
499 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
500 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
501
502 /*
503 * Do debug stop if requested.
504 */
505 int rc = VINF_SUCCESS; NOREF(pVM);
506#ifdef VBOX_STRICT
507 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
508 {
509# ifdef IN_RING3
510 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
511 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
512 if (rc == VERR_DBGF_NOT_ATTACHED)
513 rc = VINF_SUCCESS;
514# else
515 return VINF_IOM_R3_MMIO_READ;
516# endif
517 }
518#endif
519
520 /*
521 * Split and conquer.
522 */
523 for (;;)
524 {
525 /*
526 * Do DWORD read from the device.
527 */
528 uint32_t u32Value;
529 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
530 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
531 switch (rc2)
532 {
533 case VINF_SUCCESS:
534 break;
535 case VINF_IOM_MMIO_UNUSED_FF:
536 u32Value = UINT32_C(0xffffffff);
537 break;
538 case VINF_IOM_MMIO_UNUSED_00:
539 u32Value = 0;
540 break;
541 case VINF_IOM_R3_MMIO_READ:
542 case VINF_IOM_R3_MMIO_READ_WRITE:
543 case VINF_IOM_R3_MMIO_WRITE:
544 /** @todo What if we've split a transfer and already read
545 * something? Since reads can have sideeffects we could be
546 * kind of screwed here... */
547 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
548 return rc2;
549 default:
550 if (RT_FAILURE(rc2))
551 {
552 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
553 return rc2;
554 }
555 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
556 if (rc == VINF_SUCCESS || rc2 < rc)
557 rc = rc2;
558 break;
559 }
560 u32Value >>= (GCPhys & 3) * 8;
561
562 /*
563 * Write what we've read.
564 */
565 unsigned cbThisPart = 4 - (GCPhys & 3);
566 if (cbThisPart > cbValue)
567 cbThisPart = cbValue;
568
569 switch (cbThisPart)
570 {
571 case 1:
572 *(uint8_t *)pvValue = (uint8_t)u32Value;
573 break;
574 case 2:
575 *(uint16_t *)pvValue = (uint16_t)u32Value;
576 break;
577 case 3:
578 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
579 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
580 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
581 break;
582 case 4:
583 *(uint32_t *)pvValue = u32Value;
584 break;
585 }
586
587 /*
588 * Advance.
589 */
590 cbValue -= cbThisPart;
591 if (!cbValue)
592 break;
593 GCPhys += cbThisPart;
594 pvValue = (uint8_t *)pvValue + cbThisPart;
595 }
596
597 return rc;
598}
599
600
601/**
602 * Implements VINF_IOM_MMIO_UNUSED_FF.
603 *
604 * @returns VINF_SUCCESS.
605 * @param pvValue Where to store the zeros.
606 * @param cbValue How many bytes to read.
607 */
608static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
609{
610 switch (cbValue)
611 {
612 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
613 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
614 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
615 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
616 default:
617 {
618 uint8_t *pb = (uint8_t *)pvValue;
619 while (cbValue--)
620 *pb++ = UINT8_C(0xff);
621 break;
622 }
623 }
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Implements VINF_IOM_MMIO_UNUSED_00.
630 *
631 * @returns VINF_SUCCESS.
632 * @param pvValue Where to store the zeros.
633 * @param cbValue How many bytes to read.
634 */
635static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
636{
637 switch (cbValue)
638 {
639 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
640 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
641 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
642 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
643 default:
644 {
645 uint8_t *pb = (uint8_t *)pvValue;
646 while (cbValue--)
647 *pb++ = UINT8_C(0x00);
648 break;
649 }
650 }
651 return VINF_SUCCESS;
652}
653
654
655/**
656 * Wrapper which does the read and updates range statistics when such are enabled.
657 */
658DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
659 void *pvValue, unsigned cbValue)
660{
661#ifdef VBOX_WITH_STATISTICS
662 int rcSem = IOM_LOCK_SHARED(pVM);
663 if (rcSem == VERR_SEM_BUSY)
664 return VINF_IOM_R3_MMIO_READ;
665 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
666 if (!pStats)
667# ifdef IN_RING3
668 return VERR_NO_MEMORY;
669# else
670 return VINF_IOM_R3_MMIO_READ;
671# endif
672 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
673#else
674 NOREF(pVCpu);
675#endif
676
677 VBOXSTRICTRC rcStrict;
678 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
679 {
680 if ( ( cbValue == 4
681 && !(GCPhys & 3))
682 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
683 || ( cbValue == 8
684 && !(GCPhys & 7)
685 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
686 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
687 pvValue, cbValue);
688 else
689 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
690 }
691 else
692 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
693 if (rcStrict != VINF_SUCCESS)
694 {
695 switch (VBOXSTRICTRC_VAL(rcStrict))
696 {
697 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
698 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
699 }
700 }
701
702 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
703 STAM_COUNTER_INC(&pStats->Accesses);
704 return rcStrict;
705}
706
707
708/**
709 * Internal - statistics only.
710 */
711DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
712{
713#ifdef VBOX_WITH_STATISTICS
714 switch (cb)
715 {
716 case 1:
717 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
718 break;
719 case 2:
720 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
721 break;
722 case 4:
723 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
724 break;
725 case 8:
726 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
727 break;
728 default:
729 /* No way. */
730 AssertMsgFailed(("Invalid data length %d\n", cb));
731 break;
732 }
733#else
734 NOREF(pVM); NOREF(cb);
735#endif
736}
737
738
739#ifndef IEM_USE_IEM_INSTEAD
740
741/**
742 * MOV reg, mem (read)
743 * MOVZX reg, mem (read)
744 * MOVSX reg, mem (read)
745 *
746 * @returns VBox status code.
747 *
748 * @param pVM The cross context VM structure.
749 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
750 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
751 * @param pCpu Disassembler CPU state.
752 * @param pRange Pointer MMIO range.
753 * @param GCPhysFault The GC physical address corresponding to pvFault.
754 */
755static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
756 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
757{
758 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
759
760 /*
761 * Get the data size from parameter 2,
762 * and call the handler function to get the data.
763 */
764 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
765 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
766
767 uint64_t u64Data = 0;
768 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
769 if (rc == VINF_SUCCESS)
770 {
771 /*
772 * Do sign extension for MOVSX.
773 */
774 /** @todo checkup MOVSX implementation! */
775 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
776 {
777 if (cb == 1)
778 {
779 /* DWORD <- BYTE */
780 int64_t iData = (int8_t)u64Data;
781 u64Data = (uint64_t)iData;
782 }
783 else
784 {
785 /* DWORD <- WORD */
786 int64_t iData = (int16_t)u64Data;
787 u64Data = (uint64_t)iData;
788 }
789 }
790
791 /*
792 * Store the result to register (parameter 1).
793 */
794 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
795 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
796 }
797
798 if (rc == VINF_SUCCESS)
799 iomMMIOStatLength(pVM, cb);
800 return rc;
801}
802
803
804/**
805 * MOV mem, reg|imm (write)
806 *
807 * @returns VBox status code.
808 *
809 * @param pVM The cross context VM structure.
810 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
811 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
812 * @param pCpu Disassembler CPU state.
813 * @param pRange Pointer MMIO range.
814 * @param GCPhysFault The GC physical address corresponding to pvFault.
815 */
816static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
817 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
818{
819 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
820
821 /*
822 * Get data to write from second parameter,
823 * and call the callback to write it.
824 */
825 unsigned cb = 0;
826 uint64_t u64Data = 0;
827 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
828 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
829
830 int rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
831 if (rc == VINF_SUCCESS)
832 iomMMIOStatLength(pVM, cb);
833 return rc;
834}
835
836
837/** Wrapper for reading virtual memory. */
838DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
839{
840 /* Note: This will fail in R0 or RC if it hits an access handler. That
841 isn't a problem though since the operation can be restarted in REM. */
842#ifdef IN_RC
843 NOREF(pVCpu);
844 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
845 /* Page may be protected and not directly accessible. */
846 if (rc == VERR_ACCESS_DENIED)
847 rc = VINF_IOM_R3_IOPORT_WRITE;
848 return rc;
849#else
850 return VBOXSTRICTRC_VAL(PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb, PGMACCESSORIGIN_IOM));
851#endif
852}
853
854
855/** Wrapper for writing virtual memory. */
856DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
857{
858 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
859 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
860 * as well since we're not behind the pgm lock and handler may change between calls.
861 *
862 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
863 * the state of some shadowed structures. */
864#if defined(IN_RING0) || defined(IN_RC)
865 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
866#else
867 NOREF(pCtxCore);
868 return VBOXSTRICTRC_VAL(PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb, PGMACCESSORIGIN_IOM));
869#endif
870}
871
872
873#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
874/**
875 * [REP] MOVSB
876 * [REP] MOVSW
877 * [REP] MOVSD
878 *
879 * Restricted implementation.
880 *
881 *
882 * @returns VBox status code.
883 *
884 * @param pVM The cross context VM structure.
885 * @param uErrorCode CPU Error code.
886 * @param pRegFrame Trap register frame.
887 * @param GCPhysFault The GC physical address corresponding to pvFault.
888 * @param pCpu Disassembler CPU state.
889 * @param pRange Pointer MMIO range.
890 * @param ppStat Which sub-sample to attribute this call to.
891 */
892static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
893 PSTAMPROFILE *ppStat)
894{
895 /*
896 * We do not support segment prefixes or REPNE.
897 */
898 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
899 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
900
901 PVMCPU pVCpu = VMMGetCpu(pVM);
902
903 /*
904 * Get bytes/words/dwords/qword count to copy.
905 */
906 uint32_t cTransfers = 1;
907 if (pCpu->fPrefix & DISPREFIX_REP)
908 {
909#ifndef IN_RC
910 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
911 && pRegFrame->rcx >= _4G)
912 return VINF_EM_RAW_EMULATE_INSTR;
913#endif
914
915 cTransfers = pRegFrame->ecx;
916 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
917 cTransfers &= 0xffff;
918
919 if (!cTransfers)
920 return VINF_SUCCESS;
921 }
922
923 /* Get the current privilege level. */
924 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
925
926 /*
927 * Get data size.
928 */
929 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
930 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
931 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
932
933#ifdef VBOX_WITH_STATISTICS
934 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
935 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
936#endif
937
938/** @todo re-evaluate on page boundaries. */
939
940 RTGCPHYS Phys = GCPhysFault;
941 int rc;
942 if (fWriteAccess)
943 {
944 /*
945 * Write operation: [Mem] -> [MMIO]
946 * ds:esi (Virt Src) -> es:edi (Phys Dst)
947 */
948 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
949
950 /* Check callback. */
951 if (!pRange->CTX_SUFF(pfnWriteCallback))
952 return VINF_IOM_R3_MMIO_WRITE;
953
954 /* Convert source address ds:esi. */
955 RTGCUINTPTR pu8Virt;
956 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
957 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
958 (PRTGCPTR)&pu8Virt);
959 if (RT_SUCCESS(rc))
960 {
961
962 /* Access verification first; we currently can't recover properly from traps inside this instruction */
963 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
964 if (rc != VINF_SUCCESS)
965 {
966 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
967 return VINF_EM_RAW_EMULATE_INSTR;
968 }
969
970#ifdef IN_RC
971 MMGCRamRegisterTrapHandler(pVM);
972#endif
973
974 /* copy loop. */
975 while (cTransfers)
976 {
977 uint32_t u32Data = 0;
978 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
979 if (rc != VINF_SUCCESS)
980 break;
981 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb));
982 if (rc != VINF_SUCCESS)
983 break;
984
985 pu8Virt += offIncrement;
986 Phys += offIncrement;
987 pRegFrame->rsi += offIncrement;
988 pRegFrame->rdi += offIncrement;
989 cTransfers--;
990 }
991#ifdef IN_RC
992 MMGCRamDeregisterTrapHandler(pVM);
993#endif
994 /* Update ecx. */
995 if (pCpu->fPrefix & DISPREFIX_REP)
996 pRegFrame->ecx = cTransfers;
997 }
998 else
999 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1000 }
1001 else
1002 {
1003 /*
1004 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
1005 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
1006 */
1007 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
1008
1009 /* Check callback. */
1010 if (!pRange->CTX_SUFF(pfnReadCallback))
1011 return VINF_IOM_R3_MMIO_READ;
1012
1013 /* Convert destination address. */
1014 RTGCUINTPTR pu8Virt;
1015 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1016 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1017 (RTGCPTR *)&pu8Virt);
1018 if (RT_FAILURE(rc))
1019 return VINF_IOM_R3_MMIO_READ;
1020
1021 /* Check if destination address is MMIO. */
1022 PIOMMMIORANGE pMMIODst;
1023 RTGCPHYS PhysDst;
1024 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
1025 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
1026 if ( RT_SUCCESS(rc)
1027 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
1028 {
1029 /** @todo implement per-device locks for MMIO access. */
1030 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1031
1032 /*
1033 * Extra: [MMIO] -> [MMIO]
1034 */
1035 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
1036 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
1037 {
1038 iomMmioReleaseRange(pVM, pRange);
1039 return VINF_IOM_R3_MMIO_READ_WRITE;
1040 }
1041
1042 /* copy loop. */
1043 while (cTransfers)
1044 {
1045 uint32_t u32Data;
1046 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1047 if (rc != VINF_SUCCESS)
1048 break;
1049 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb));
1050 if (rc != VINF_SUCCESS)
1051 break;
1052
1053 Phys += offIncrement;
1054 PhysDst += offIncrement;
1055 pRegFrame->rsi += offIncrement;
1056 pRegFrame->rdi += offIncrement;
1057 cTransfers--;
1058 }
1059 iomMmioReleaseRange(pVM, pRange);
1060 }
1061 else
1062 {
1063 /*
1064 * Normal: [MMIO] -> [Mem]
1065 */
1066 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1067 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1068 if (rc != VINF_SUCCESS)
1069 {
1070 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
1071 return VINF_EM_RAW_EMULATE_INSTR;
1072 }
1073
1074 /* copy loop. */
1075#ifdef IN_RC
1076 MMGCRamRegisterTrapHandler(pVM);
1077#endif
1078 while (cTransfers)
1079 {
1080 uint32_t u32Data;
1081 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1082 if (rc != VINF_SUCCESS)
1083 break;
1084 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
1085 if (rc != VINF_SUCCESS)
1086 {
1087 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
1088 break;
1089 }
1090
1091 pu8Virt += offIncrement;
1092 Phys += offIncrement;
1093 pRegFrame->rsi += offIncrement;
1094 pRegFrame->rdi += offIncrement;
1095 cTransfers--;
1096 }
1097#ifdef IN_RC
1098 MMGCRamDeregisterTrapHandler(pVM);
1099#endif
1100 }
1101
1102 /* Update ecx on exit. */
1103 if (pCpu->fPrefix & DISPREFIX_REP)
1104 pRegFrame->ecx = cTransfers;
1105 }
1106
1107 /* work statistics. */
1108 if (rc == VINF_SUCCESS)
1109 iomMMIOStatLength(pVM, cb);
1110 NOREF(ppStat);
1111 return rc;
1112}
1113#endif /* IOM_WITH_MOVS_SUPPORT */
1114
1115
1116/**
1117 * Gets the address / opcode mask corresponding to the given CPU mode.
1118 *
1119 * @returns Mask.
1120 * @param enmCpuMode CPU mode.
1121 */
1122static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
1123{
1124 switch (enmCpuMode)
1125 {
1126 case DISCPUMODE_16BIT: return UINT16_MAX;
1127 case DISCPUMODE_32BIT: return UINT32_MAX;
1128 case DISCPUMODE_64BIT: return UINT64_MAX;
1129 default:
1130 AssertFailedReturn(UINT32_MAX);
1131 }
1132}
1133
1134
1135/**
1136 * [REP] STOSB
1137 * [REP] STOSW
1138 * [REP] STOSD
1139 *
1140 * Restricted implementation.
1141 *
1142 *
1143 * @returns VBox status code.
1144 *
1145 * @param pVM The cross context VM structure.
1146 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1147 * @param pRegFrame Trap register frame.
1148 * @param GCPhysFault The GC physical address corresponding to pvFault.
1149 * @param pCpu Disassembler CPU state.
1150 * @param pRange Pointer MMIO range.
1151 */
1152static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
1153 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1154{
1155 /*
1156 * We do not support segment prefixes or REPNE..
1157 */
1158 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
1159 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1160
1161 /*
1162 * Get bytes/words/dwords/qwords count to copy.
1163 */
1164 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1165 RTGCUINTREG cTransfers = 1;
1166 if (pCpu->fPrefix & DISPREFIX_REP)
1167 {
1168#ifndef IN_RC
1169 if ( CPUMIsGuestIn64BitCode(pVCpu)
1170 && pRegFrame->rcx >= _4G)
1171 return VINF_EM_RAW_EMULATE_INSTR;
1172#endif
1173
1174 cTransfers = pRegFrame->rcx & fAddrMask;
1175 if (!cTransfers)
1176 return VINF_SUCCESS;
1177 }
1178
1179/** @todo r=bird: bounds checks! */
1180
1181 /*
1182 * Get data size.
1183 */
1184 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
1185 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1186 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1187
1188#ifdef VBOX_WITH_STATISTICS
1189 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1190 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1191#endif
1192
1193
1194 RTGCPHYS Phys = GCPhysFault;
1195 int rc;
1196 if ( pRange->CTX_SUFF(pfnFillCallback)
1197 && cb <= 4 /* can only fill 32-bit values */)
1198 {
1199 /*
1200 * Use the fill callback.
1201 */
1202 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1203 if (offIncrement > 0)
1204 {
1205 /* addr++ variant. */
1206 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1207 pRegFrame->eax, cb, cTransfers);
1208 if (rc == VINF_SUCCESS)
1209 {
1210 /* Update registers. */
1211 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1212 | (pRegFrame->rdi & ~fAddrMask);
1213 if (pCpu->fPrefix & DISPREFIX_REP)
1214 pRegFrame->rcx &= ~fAddrMask;
1215 }
1216 }
1217 else
1218 {
1219 /* addr-- variant. */
1220 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1221 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1222 pRegFrame->eax, cb, cTransfers);
1223 if (rc == VINF_SUCCESS)
1224 {
1225 /* Update registers. */
1226 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1227 | (pRegFrame->rdi & ~fAddrMask);
1228 if (pCpu->fPrefix & DISPREFIX_REP)
1229 pRegFrame->rcx &= ~fAddrMask;
1230 }
1231 }
1232 }
1233 else
1234 {
1235 /*
1236 * Use the write callback.
1237 */
1238 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1239 uint64_t u64Data = pRegFrame->rax;
1240
1241 /* fill loop. */
1242 do
1243 {
1244 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb));
1245 if (rc != VINF_SUCCESS)
1246 break;
1247
1248 Phys += offIncrement;
1249 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1250 | (pRegFrame->rdi & ~fAddrMask);
1251 cTransfers--;
1252 } while (cTransfers);
1253
1254 /* Update rcx on exit. */
1255 if (pCpu->fPrefix & DISPREFIX_REP)
1256 pRegFrame->rcx = (cTransfers & fAddrMask)
1257 | (pRegFrame->rcx & ~fAddrMask);
1258 }
1259
1260 /*
1261 * Work statistics and return.
1262 */
1263 if (rc == VINF_SUCCESS)
1264 iomMMIOStatLength(pVM, cb);
1265 return rc;
1266}
1267
1268
1269/**
1270 * [REP] LODSB
1271 * [REP] LODSW
1272 * [REP] LODSD
1273 *
1274 * Restricted implementation.
1275 *
1276 *
1277 * @returns VBox status code.
1278 *
1279 * @param pVM The cross context VM structure.
1280 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1281 * @param pRegFrame Trap register frame.
1282 * @param GCPhysFault The GC physical address corresponding to pvFault.
1283 * @param pCpu Disassembler CPU state.
1284 * @param pRange Pointer MMIO range.
1285 */
1286static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1287 PIOMMMIORANGE pRange)
1288{
1289 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1290
1291 /*
1292 * We do not support segment prefixes or REP*.
1293 */
1294 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1295 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1296
1297 /*
1298 * Get data size.
1299 */
1300 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1301 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1302 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1303
1304 /*
1305 * Perform read.
1306 */
1307 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb));
1308 if (rc == VINF_SUCCESS)
1309 {
1310 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1311 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1312 | (pRegFrame->rsi & ~fAddrMask);
1313 }
1314
1315 /*
1316 * Work statistics and return.
1317 */
1318 if (rc == VINF_SUCCESS)
1319 iomMMIOStatLength(pVM, cb);
1320 return rc;
1321}
1322
1323
1324/**
1325 * CMP [MMIO], reg|imm
1326 * CMP reg|imm, [MMIO]
1327 *
1328 * Restricted implementation.
1329 *
1330 *
1331 * @returns VBox status code.
1332 *
1333 * @param pVM The cross context VM structure.
1334 * @param pRegFrame Trap register frame.
1335 * @param GCPhysFault The GC physical address corresponding to pvFault.
1336 * @param pCpu Disassembler CPU state.
1337 * @param pRange Pointer MMIO range.
1338 */
1339static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1340 PIOMMMIORANGE pRange)
1341{
1342 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1343
1344 /*
1345 * Get the operands.
1346 */
1347 unsigned cb = 0;
1348 uint64_t uData1 = 0;
1349 uint64_t uData2 = 0;
1350 int rc;
1351 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1352 /* cmp reg, [MMIO]. */
1353 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1354 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1355 /* cmp [MMIO], reg|imm. */
1356 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1357 else
1358 {
1359 AssertMsgFailed(("Disassember CMP problem..\n"));
1360 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1361 }
1362
1363 if (rc == VINF_SUCCESS)
1364 {
1365#if HC_ARCH_BITS == 32
1366 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1367 if (cb > 4)
1368 return VINF_IOM_R3_MMIO_READ_WRITE;
1369#endif
1370 /* Emulate CMP and update guest flags. */
1371 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1372 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1373 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1374 iomMMIOStatLength(pVM, cb);
1375 }
1376
1377 return rc;
1378}
1379
1380
1381/**
1382 * AND [MMIO], reg|imm
1383 * AND reg, [MMIO]
1384 * OR [MMIO], reg|imm
1385 * OR reg, [MMIO]
1386 *
1387 * Restricted implementation.
1388 *
1389 *
1390 * @returns VBox status code.
1391 *
1392 * @param pVM The cross context VM structure.
1393 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1394 * @param pRegFrame Trap register frame.
1395 * @param GCPhysFault The GC physical address corresponding to pvFault.
1396 * @param pCpu Disassembler CPU state.
1397 * @param pRange Pointer MMIO range.
1398 * @param pfnEmulate Instruction emulation function.
1399 */
1400static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1401 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1402{
1403 unsigned cb = 0;
1404 uint64_t uData1 = 0;
1405 uint64_t uData2 = 0;
1406 bool fAndWrite;
1407 int rc;
1408
1409#ifdef LOG_ENABLED
1410 const char *pszInstr;
1411
1412 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1413 pszInstr = "Xor";
1414 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1415 pszInstr = "Or";
1416 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1417 pszInstr = "And";
1418 else
1419 pszInstr = "OrXorAnd??";
1420#endif
1421
1422 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1423 {
1424#if HC_ARCH_BITS == 32
1425 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1426 if (cb > 4)
1427 return VINF_IOM_R3_MMIO_READ_WRITE;
1428#endif
1429 /* and reg, [MMIO]. */
1430 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1431 fAndWrite = false;
1432 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1433 }
1434 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1435 {
1436#if HC_ARCH_BITS == 32
1437 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1438 if (cb > 4)
1439 return VINF_IOM_R3_MMIO_READ_WRITE;
1440#endif
1441 /* and [MMIO], reg|imm. */
1442 fAndWrite = true;
1443 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1444 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1445 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1446 else
1447 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1448 }
1449 else
1450 {
1451 AssertMsgFailed(("Disassember AND problem..\n"));
1452 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1453 }
1454
1455 if (rc == VINF_SUCCESS)
1456 {
1457 /* Emulate AND and update guest flags. */
1458 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1459
1460 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1461
1462 if (fAndWrite)
1463 /* Store result to MMIO. */
1464 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1465 else
1466 {
1467 /* Store result to register. */
1468 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1469 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1470 }
1471 if (rc == VINF_SUCCESS)
1472 {
1473 /* Update guest's eflags and finish. */
1474 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1475 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1476 iomMMIOStatLength(pVM, cb);
1477 }
1478 }
1479
1480 return rc;
1481}
1482
1483
1484/**
1485 * TEST [MMIO], reg|imm
1486 * TEST reg, [MMIO]
1487 *
1488 * Restricted implementation.
1489 *
1490 *
1491 * @returns VBox status code.
1492 *
1493 * @param pVM The cross context VM structure.
1494 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1495 * @param pRegFrame Trap register frame.
1496 * @param GCPhysFault The GC physical address corresponding to pvFault.
1497 * @param pCpu Disassembler CPU state.
1498 * @param pRange Pointer MMIO range.
1499 */
1500static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1501 PIOMMMIORANGE pRange)
1502{
1503 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1504
1505 unsigned cb = 0;
1506 uint64_t uData1 = 0;
1507 uint64_t uData2 = 0;
1508 int rc;
1509
1510 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1511 {
1512 /* and test, [MMIO]. */
1513 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1514 }
1515 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1516 {
1517 /* test [MMIO], reg|imm. */
1518 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1519 }
1520 else
1521 {
1522 AssertMsgFailed(("Disassember TEST problem..\n"));
1523 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1524 }
1525
1526 if (rc == VINF_SUCCESS)
1527 {
1528#if HC_ARCH_BITS == 32
1529 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1530 if (cb > 4)
1531 return VINF_IOM_R3_MMIO_READ_WRITE;
1532#endif
1533
1534 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1535 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1536 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1537 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1538 iomMMIOStatLength(pVM, cb);
1539 }
1540
1541 return rc;
1542}
1543
1544
1545/**
1546 * BT [MMIO], reg|imm
1547 *
1548 * Restricted implementation.
1549 *
1550 *
1551 * @returns VBox status code.
1552 *
1553 * @param pVM The cross context VM structure.
1554 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1555 * @param pRegFrame Trap register frame.
1556 * @param GCPhysFault The GC physical address corresponding to pvFault.
1557 * @param pCpu Disassembler CPU state.
1558 * @param pRange Pointer MMIO range.
1559 */
1560static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1561 PIOMMMIORANGE pRange)
1562{
1563 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1564
1565 uint64_t uBit = 0;
1566 uint64_t uData = 0;
1567 unsigned cbIgnored;
1568
1569 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1570 {
1571 AssertMsgFailed(("Disassember BT problem..\n"));
1572 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1573 }
1574 /* The size of the memory operand only matters here. */
1575 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1576
1577 /* bt [MMIO], reg|imm. */
1578 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData));
1579 if (rc == VINF_SUCCESS)
1580 {
1581 /* Find the bit inside the faulting address */
1582 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1583 iomMMIOStatLength(pVM, cbData);
1584 }
1585
1586 return rc;
1587}
1588
1589/**
1590 * XCHG [MMIO], reg
1591 * XCHG reg, [MMIO]
1592 *
1593 * Restricted implementation.
1594 *
1595 *
1596 * @returns VBox status code.
1597 *
1598 * @param pVM The cross context VM structure.
1599 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1600 * @param pRegFrame Trap register frame.
1601 * @param GCPhysFault The GC physical address corresponding to pvFault.
1602 * @param pCpu Disassembler CPU state.
1603 * @param pRange Pointer MMIO range.
1604 */
1605static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1606 PIOMMMIORANGE pRange)
1607{
1608 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1609 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1610 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1611 return VINF_IOM_R3_MMIO_READ_WRITE;
1612
1613 int rc;
1614 unsigned cb = 0;
1615 uint64_t uData1 = 0;
1616 uint64_t uData2 = 0;
1617 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1618 {
1619 /* xchg reg, [MMIO]. */
1620 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1621 if (rc == VINF_SUCCESS)
1622 {
1623 /* Store result to MMIO. */
1624 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1625
1626 if (rc == VINF_SUCCESS)
1627 {
1628 /* Store result to register. */
1629 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1630 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1631 }
1632 else
1633 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1634 }
1635 else
1636 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1637 }
1638 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1639 {
1640 /* xchg [MMIO], reg. */
1641 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1642 if (rc == VINF_SUCCESS)
1643 {
1644 /* Store result to MMIO. */
1645 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1646 if (rc == VINF_SUCCESS)
1647 {
1648 /* Store result to register. */
1649 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1650 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1651 }
1652 else
1653 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1654 }
1655 else
1656 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1657 }
1658 else
1659 {
1660 AssertMsgFailed(("Disassember XCHG problem..\n"));
1661 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1662 }
1663 return rc;
1664}
1665
1666#endif /* !IEM_USE_IEM_INSTEAD */
1667
1668/**
1669 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
1670 *
1671 * @returns VBox status code (appropriate for GC return).
1672 * @param pVM The cross context VM structure.
1673 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1674 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1675 * any error code (the EPT misconfig hack).
1676 * @param pCtxCore Trap register frame.
1677 * @param GCPhysFault The GC physical address corresponding to pvFault.
1678 * @param pvUser Pointer to the MMIO ring-3 range entry.
1679 */
1680static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
1681 RTGCPHYS GCPhysFault, void *pvUser)
1682{
1683 int rc = IOM_LOCK_SHARED(pVM);
1684#ifndef IN_RING3
1685 if (rc == VERR_SEM_BUSY)
1686 return VINF_IOM_R3_MMIO_READ_WRITE;
1687#endif
1688 AssertRC(rc);
1689
1690 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1691 Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1692
1693 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1694 Assert(pRange);
1695 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1696 iomMmioRetainRange(pRange);
1697#ifndef VBOX_WITH_STATISTICS
1698 IOM_UNLOCK_SHARED(pVM);
1699
1700#else
1701 /*
1702 * Locate the statistics.
1703 */
1704 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1705 if (!pStats)
1706 {
1707 iomMmioReleaseRange(pVM, pRange);
1708# ifdef IN_RING3
1709 return VERR_NO_MEMORY;
1710# else
1711 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1712 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1713 return VINF_IOM_R3_MMIO_READ_WRITE;
1714# endif
1715 }
1716#endif
1717
1718#ifndef IN_RING3
1719 /*
1720 * Should we defer the request right away? This isn't usually the case, so
1721 * do the simple test first and the try deal with uErrorCode being N/A.
1722 */
1723 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1724 || !pRange->CTX_SUFF(pfnReadCallback))
1725 && ( uErrorCode == UINT32_MAX
1726 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1727 : uErrorCode & X86_TRAP_PF_RW
1728 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1729 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1730 )
1731 )
1732 )
1733 {
1734 if (uErrorCode & X86_TRAP_PF_RW)
1735 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1736 else
1737 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1738
1739 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1740 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1741 iomMmioReleaseRange(pVM, pRange);
1742 return VINF_IOM_R3_MMIO_READ_WRITE;
1743 }
1744#endif /* !IN_RING3 */
1745
1746 /*
1747 * Retain the range and do locking.
1748 */
1749 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1750 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1751 if (rc != VINF_SUCCESS)
1752 {
1753 iomMmioReleaseRange(pVM, pRange);
1754 return rc;
1755 }
1756
1757#ifdef IEM_USE_IEM_INSTEAD
1758
1759 /*
1760 * Let IEM call us back via iomMmioHandler.
1761 */
1762 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
1763
1764 NOREF(pCtxCore); NOREF(GCPhysFault);
1765 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1766 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1767 iomMmioReleaseRange(pVM, pRange);
1768 if (RT_SUCCESS(rcStrict))
1769 return rcStrict;
1770 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1771 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1772 {
1773 Log(("IOM: Hit unsupported IEM feature!\n"));
1774 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
1775 }
1776 return rcStrict;
1777
1778#else
1779
1780 /*
1781 * Disassemble the instruction and interpret it.
1782 */
1783 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1784 unsigned cbOp;
1785 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1786 if (RT_FAILURE(rc))
1787 {
1788 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1789 iomMmioReleaseRange(pVM, pRange);
1790 return rc;
1791 }
1792 switch (pDis->pCurInstr->uOpcode)
1793 {
1794 case OP_MOV:
1795 case OP_MOVZX:
1796 case OP_MOVSX:
1797 {
1798 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1799 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1800 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1801 ? uErrorCode & X86_TRAP_PF_RW
1802 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1803 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1804 else
1805 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1806 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1807 break;
1808 }
1809
1810
1811# ifdef IOM_WITH_MOVS_SUPPORT
1812 case OP_MOVSB:
1813 case OP_MOVSWD:
1814 {
1815 if (uErrorCode == UINT32_MAX)
1816 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1817 else
1818 {
1819 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1820 PSTAMPROFILE pStat = NULL;
1821 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1822 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1823 }
1824 break;
1825 }
1826# endif
1827
1828 case OP_STOSB:
1829 case OP_STOSWD:
1830 Assert(uErrorCode & X86_TRAP_PF_RW);
1831 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1832 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1833 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1834 break;
1835
1836 case OP_LODSB:
1837 case OP_LODSWD:
1838 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1839 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1840 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1841 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1842 break;
1843
1844 case OP_CMP:
1845 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1846 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1847 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1848 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1849 break;
1850
1851 case OP_AND:
1852 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1853 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1854 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1855 break;
1856
1857 case OP_OR:
1858 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1859 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1860 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1861 break;
1862
1863 case OP_XOR:
1864 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1865 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1866 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1867 break;
1868
1869 case OP_TEST:
1870 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1871 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1872 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1873 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1874 break;
1875
1876 case OP_BT:
1877 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1878 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1879 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1880 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1881 break;
1882
1883 case OP_XCHG:
1884 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1885 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1886 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1887 break;
1888
1889
1890 /*
1891 * The instruction isn't supported. Hand it on to ring-3.
1892 */
1893 default:
1894 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1895 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1896 break;
1897 }
1898
1899 /*
1900 * On success advance EIP.
1901 */
1902 if (rc == VINF_SUCCESS)
1903 pCtxCore->rip += cbOp;
1904 else
1905 {
1906 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1907# if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1908 switch (rc)
1909 {
1910 case VINF_IOM_R3_MMIO_READ:
1911 case VINF_IOM_R3_MMIO_READ_WRITE:
1912 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1913 break;
1914 case VINF_IOM_R3_MMIO_WRITE:
1915 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1916 break;
1917 }
1918# endif
1919 }
1920
1921 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1922 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1923 iomMmioReleaseRange(pVM, pRange);
1924 return rc;
1925#endif /* !IEM_USE_IEM_INSTEAD */
1926}
1927
1928
1929/**
1930 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
1931 * \#PF access handler callback for MMIO pages.}
1932 *
1933 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
1934 */
1935DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
1936 RTGCPHYS GCPhysFault, void *pvUser)
1937{
1938 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1939 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); NOREF(pvFault);
1940 return iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1941}
1942
1943
1944/**
1945 * Physical access handler for MMIO ranges.
1946 *
1947 * @returns VBox status code (appropriate for GC return).
1948 * @param pVM The cross context VM structure.
1949 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1950 * @param uErrorCode CPU Error code.
1951 * @param pCtxCore Trap register frame.
1952 * @param GCPhysFault The GC physical address.
1953 */
1954VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1955{
1956 /*
1957 * We don't have a range here, so look it up before calling the common function.
1958 */
1959 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
1960#ifndef IN_RING3
1961 if (rc2 == VERR_SEM_BUSY)
1962 return VINF_IOM_R3_MMIO_READ_WRITE;
1963#endif
1964 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
1965 if (RT_UNLIKELY(!pRange))
1966 {
1967 IOM_UNLOCK_SHARED(pVM);
1968 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1969 }
1970 iomMmioRetainRange(pRange);
1971 IOM_UNLOCK_SHARED(pVM);
1972
1973 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
1974
1975 iomMmioReleaseRange(pVM, pRange);
1976 return VBOXSTRICTRC_VAL(rcStrict);
1977}
1978
1979
1980/**
1981 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
1982 *
1983 * @remarks The @a pvUser argument points to the MMIO range entry.
1984 */
1985PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
1986 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
1987{
1988 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1989 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1990
1991 AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf));
1992 AssertPtr(pRange);
1993 NOREF(pvPhys); NOREF(enmOrigin);
1994
1995 /*
1996 * Validate the range.
1997 */
1998 int rc = IOM_LOCK_SHARED(pVM);
1999#ifndef IN_RING3
2000 if (rc == VERR_SEM_BUSY)
2001 return VINF_IOM_R3_MMIO_READ_WRITE;
2002#endif
2003 AssertRC(rc);
2004 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
2005
2006 /*
2007 * Perform locking.
2008 */
2009 iomMmioRetainRange(pRange);
2010 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2011 IOM_UNLOCK_SHARED(pVM);
2012 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
2013 if (rcStrict == VINF_SUCCESS)
2014 {
2015 /*
2016 * Perform the access.
2017 */
2018 if (enmAccessType == PGMACCESSTYPE_READ)
2019 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2020 else
2021 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2022
2023 /* Check the return code. */
2024#ifdef IN_RING3
2025 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
2026#else
2027 AssertMsg( rcStrict == VINF_SUCCESS
2028 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
2029 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
2030 || rcStrict == VINF_EM_DBG_STOP
2031 || rcStrict == VINF_EM_DBG_EVENT
2032 || rcStrict == VINF_EM_DBG_BREAKPOINT
2033 || rcStrict == VINF_EM_OFF
2034 || rcStrict == VINF_EM_SUSPEND
2035 || rcStrict == VINF_EM_RESET
2036 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
2037 //|| rcStrict == VINF_EM_HALT /* ?? */
2038 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
2039 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
2040#endif
2041
2042 iomMmioReleaseRange(pVM, pRange);
2043 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2044 }
2045 else
2046 iomMmioReleaseRange(pVM, pRange);
2047 return rcStrict;
2048}
2049
2050
2051#ifdef IN_RING3 /* Only used by REM. */
2052
2053/**
2054 * Reads a MMIO register.
2055 *
2056 * @returns VBox status code.
2057 *
2058 * @param pVM The cross context VM structure.
2059 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2060 * @param GCPhys The physical address to read.
2061 * @param pu32Value Where to store the value read.
2062 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2063 */
2064VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
2065{
2066 /* Take the IOM lock before performing any MMIO. */
2067 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2068#ifndef IN_RING3
2069 if (rc == VERR_SEM_BUSY)
2070 return VINF_IOM_R3_MMIO_WRITE;
2071#endif
2072 AssertRC(VBOXSTRICTRC_VAL(rc));
2073#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2074 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
2075#endif
2076
2077 /*
2078 * Lookup the current context range node and statistics.
2079 */
2080 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2081 if (!pRange)
2082 {
2083 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2084 IOM_UNLOCK_SHARED(pVM);
2085 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2086 }
2087 iomMmioRetainRange(pRange);
2088#ifndef VBOX_WITH_STATISTICS
2089 IOM_UNLOCK_SHARED(pVM);
2090
2091#else /* VBOX_WITH_STATISTICS */
2092 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2093 if (!pStats)
2094 {
2095 iomMmioReleaseRange(pVM, pRange);
2096# ifdef IN_RING3
2097 return VERR_NO_MEMORY;
2098# else
2099 return VINF_IOM_R3_MMIO_READ;
2100# endif
2101 }
2102 STAM_COUNTER_INC(&pStats->Accesses);
2103#endif /* VBOX_WITH_STATISTICS */
2104
2105 if (pRange->CTX_SUFF(pfnReadCallback))
2106 {
2107 /*
2108 * Perform locking.
2109 */
2110 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2111 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
2112 if (rc != VINF_SUCCESS)
2113 {
2114 iomMmioReleaseRange(pVM, pRange);
2115 return rc;
2116 }
2117
2118 /*
2119 * Perform the read and deal with the result.
2120 */
2121 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
2122 if ( (cbValue == 4 && !(GCPhys & 3))
2123 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
2124 || (cbValue == 8 && !(GCPhys & 7)) )
2125 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
2126 pu32Value, (unsigned)cbValue);
2127 else
2128 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
2129 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2130 switch (VBOXSTRICTRC_VAL(rc))
2131 {
2132 case VINF_SUCCESS:
2133 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2134 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2135 iomMmioReleaseRange(pVM, pRange);
2136 return rc;
2137#ifndef IN_RING3
2138 case VINF_IOM_R3_MMIO_READ:
2139 case VINF_IOM_R3_MMIO_READ_WRITE:
2140 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2141#endif
2142 default:
2143 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2144 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2145 iomMmioReleaseRange(pVM, pRange);
2146 return rc;
2147
2148 case VINF_IOM_MMIO_UNUSED_00:
2149 iomMMIODoRead00s(pu32Value, cbValue);
2150 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2151 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2152 iomMmioReleaseRange(pVM, pRange);
2153 return VINF_SUCCESS;
2154
2155 case VINF_IOM_MMIO_UNUSED_FF:
2156 iomMMIODoReadFFs(pu32Value, cbValue);
2157 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2158 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2159 iomMmioReleaseRange(pVM, pRange);
2160 return VINF_SUCCESS;
2161 }
2162 /* not reached */
2163 }
2164#ifndef IN_RING3
2165 if (pRange->pfnReadCallbackR3)
2166 {
2167 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2168 iomMmioReleaseRange(pVM, pRange);
2169 return VINF_IOM_R3_MMIO_READ;
2170 }
2171#endif
2172
2173 /*
2174 * Unassigned memory - this is actually not supposed t happen...
2175 */
2176 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
2177 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2178 iomMMIODoReadFFs(pu32Value, cbValue);
2179 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2180 iomMmioReleaseRange(pVM, pRange);
2181 return VINF_SUCCESS;
2182}
2183
2184
2185/**
2186 * Writes to a MMIO register.
2187 *
2188 * @returns VBox status code.
2189 *
2190 * @param pVM The cross context VM structure.
2191 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2192 * @param GCPhys The physical address to write to.
2193 * @param u32Value The value to write.
2194 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2195 */
2196VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
2197{
2198 /* Take the IOM lock before performing any MMIO. */
2199 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2200#ifndef IN_RING3
2201 if (rc == VERR_SEM_BUSY)
2202 return VINF_IOM_R3_MMIO_WRITE;
2203#endif
2204 AssertRC(VBOXSTRICTRC_VAL(rc));
2205#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2206 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
2207#endif
2208
2209 /*
2210 * Lookup the current context range node.
2211 */
2212 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2213 if (!pRange)
2214 {
2215 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2216 IOM_UNLOCK_SHARED(pVM);
2217 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2218 }
2219 iomMmioRetainRange(pRange);
2220#ifndef VBOX_WITH_STATISTICS
2221 IOM_UNLOCK_SHARED(pVM);
2222
2223#else /* VBOX_WITH_STATISTICS */
2224 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2225 if (!pStats)
2226 {
2227 iomMmioReleaseRange(pVM, pRange);
2228# ifdef IN_RING3
2229 return VERR_NO_MEMORY;
2230# else
2231 return VINF_IOM_R3_MMIO_WRITE;
2232# endif
2233 }
2234 STAM_COUNTER_INC(&pStats->Accesses);
2235#endif /* VBOX_WITH_STATISTICS */
2236
2237 if (pRange->CTX_SUFF(pfnWriteCallback))
2238 {
2239 /*
2240 * Perform locking.
2241 */
2242 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2243 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2244 if (rc != VINF_SUCCESS)
2245 {
2246 iomMmioReleaseRange(pVM, pRange);
2247 return rc;
2248 }
2249
2250 /*
2251 * Perform the write.
2252 */
2253 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2254 if ( (cbValue == 4 && !(GCPhys & 3))
2255 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2256 || (cbValue == 8 && !(GCPhys & 7)) )
2257 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2258 GCPhys, &u32Value, (unsigned)cbValue);
2259 else
2260 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2261 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2262#ifndef IN_RING3
2263 if ( rc == VINF_IOM_R3_MMIO_WRITE
2264 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2265 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2266#endif
2267 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2268 iomMmioReleaseRange(pVM, pRange);
2269 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2270 return rc;
2271 }
2272#ifndef IN_RING3
2273 if (pRange->pfnWriteCallbackR3)
2274 {
2275 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2276 iomMmioReleaseRange(pVM, pRange);
2277 return VINF_IOM_R3_MMIO_WRITE;
2278 }
2279#endif
2280
2281 /*
2282 * No write handler, nothing to do.
2283 */
2284 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2285 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2286 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2287 iomMmioReleaseRange(pVM, pRange);
2288 return VINF_SUCCESS;
2289}
2290
2291#endif /* IN_RING3 - only used by REM. */
2292#ifndef IEM_USE_IEM_INSTEAD
2293
2294/**
2295 * [REP*] INSB/INSW/INSD
2296 * ES:EDI,DX[,ECX]
2297 *
2298 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2299 *
2300 * @returns Strict VBox status code. Informational status codes other than the one documented
2301 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2302 * @retval VINF_SUCCESS Success.
2303 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2304 * status code must be passed on to EM.
2305 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2306 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2307 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2308 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2309 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2310 *
2311 * @param pVM The cross context VM structure.
2312 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2313 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2314 * @param uPort IO Port
2315 * @param uPrefix IO instruction prefix
2316 * @param enmAddrMode The address mode.
2317 * @param cbTransfer Size of transfer unit
2318 */
2319VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2320 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2321{
2322 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2323
2324 /*
2325 * We do not support REPNE or decrementing destination
2326 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2327 */
2328 if ( (uPrefix & DISPREFIX_REPNE)
2329 || pRegFrame->eflags.Bits.u1DF)
2330 return VINF_EM_RAW_EMULATE_INSTR;
2331
2332 /*
2333 * Get bytes/words/dwords count to transfer.
2334 */
2335 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2336 RTGCUINTREG cTransfers = 1;
2337 if (uPrefix & DISPREFIX_REP)
2338 {
2339#ifndef IN_RC
2340 if ( CPUMIsGuestIn64BitCode(pVCpu)
2341 && pRegFrame->rcx >= _4G)
2342 return VINF_EM_RAW_EMULATE_INSTR;
2343#endif
2344 cTransfers = pRegFrame->rcx & fAddrMask;
2345 if (!cTransfers)
2346 return VINF_SUCCESS;
2347 }
2348
2349 /* Convert destination address es:edi. */
2350 RTGCPTR GCPtrDst;
2351 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2352 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2353 &GCPtrDst);
2354 if (RT_FAILURE(rc2))
2355 {
2356 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2357 return VINF_EM_RAW_EMULATE_INSTR;
2358 }
2359
2360 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2361 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2362 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2363 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2364 if (rc2 != VINF_SUCCESS)
2365 {
2366 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2367 return VINF_EM_RAW_EMULATE_INSTR;
2368 }
2369
2370 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2371 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2372 if (cTransfers > 1)
2373 {
2374 /*
2375 * Work the string page by page, letting the device handle as much
2376 * as it likes via the string I/O interface.
2377 */
2378 for (;;)
2379 {
2380 PGMPAGEMAPLOCK Lock;
2381 void *pvDst;
2382 rc2 = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2383 if (RT_SUCCESS(rc2))
2384 {
2385 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK)) / cbTransfer;
2386 if (cMaxThisTime > cTransfers)
2387 cMaxThisTime = cTransfers;
2388 if (!cMaxThisTime)
2389 break;
2390 uint32_t cThisTime = cMaxThisTime;
2391
2392 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, pvDst, &cThisTime, cbTransfer);
2393 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2394 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2395
2396 uint32_t const cActual = cMaxThisTime - cThisTime;
2397 if (cActual)
2398 { /* Must dirty the page. */
2399 uint8_t b = *(uint8_t *)pvDst;
2400 iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &b, 1);
2401 }
2402
2403 PGMPhysReleasePageMappingLock(pVM, &Lock);
2404
2405 uint32_t const cbActual = cActual * cbTransfer;
2406 cTransfers -= cActual;
2407 pRegFrame->rdi = ((pRegFrame->rdi + cbActual) & fAddrMask)
2408 | (pRegFrame->rdi & ~fAddrMask);
2409 GCPtrDst += cbActual;
2410
2411 if ( cThisTime
2412 || !cTransfers
2413 || rcStrict != VINF_SUCCESS
2414 || (GCPtrDst & PAGE_OFFSET_MASK))
2415 break;
2416 }
2417 else
2418 {
2419 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtr %#RGv -> %Rrc\n", GCPtrDst, rc2));
2420 break;
2421 }
2422 }
2423 }
2424
2425 /*
2426 * Single transfer / unmapped memory fallback.
2427 */
2428#ifdef IN_RC
2429 MMGCRamRegisterTrapHandler(pVM);
2430#endif
2431 while (cTransfers && rcStrict == VINF_SUCCESS)
2432 {
2433 uint32_t u32Value;
2434 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2435 if (!IOM_SUCCESS(rcStrict))
2436 break;
2437 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2438 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2439 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2440 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2441 | (pRegFrame->rdi & ~fAddrMask);
2442 cTransfers--;
2443 }
2444#ifdef IN_RC
2445 MMGCRamDeregisterTrapHandler(pVM);
2446#endif
2447
2448 /* Update rcx on exit. */
2449 if (uPrefix & DISPREFIX_REP)
2450 pRegFrame->rcx = (cTransfers & fAddrMask)
2451 | (pRegFrame->rcx & ~fAddrMask);
2452
2453 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2454 return rcStrict;
2455}
2456
2457
2458/**
2459 * [REP*] OUTSB/OUTSW/OUTSD
2460 * DS:ESI,DX[,ECX]
2461 *
2462 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2463 *
2464 * @returns Strict VBox status code. Informational status codes other than the one documented
2465 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2466 * @retval VINF_SUCCESS Success.
2467 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2468 * status code must be passed on to EM.
2469 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2470 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2471 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2472 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2473 *
2474 * @param pVM The cross context VM structure.
2475 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2476 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2477 * @param uPort IO Port
2478 * @param uPrefix IO instruction prefix
2479 * @param enmAddrMode The address mode.
2480 * @param cbTransfer Size of transfer unit
2481 *
2482 * @remarks This API will probably be relaced by IEM before long, so no use in
2483 * optimizing+fixing stuff too much here.
2484 */
2485VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2486 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2487{
2488 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2489
2490 /*
2491 * We do not support segment prefixes, REPNE or
2492 * decrementing source pointer.
2493 */
2494 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2495 || pRegFrame->eflags.Bits.u1DF)
2496 return VINF_EM_RAW_EMULATE_INSTR;
2497
2498 /*
2499 * Get bytes/words/dwords count to transfer.
2500 */
2501 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2502 RTGCUINTREG cTransfers = 1;
2503 if (uPrefix & DISPREFIX_REP)
2504 {
2505#ifndef IN_RC
2506 if ( CPUMIsGuestIn64BitCode(pVCpu)
2507 && pRegFrame->rcx >= _4G)
2508 return VINF_EM_RAW_EMULATE_INSTR;
2509#endif
2510 cTransfers = pRegFrame->rcx & fAddrMask;
2511 if (!cTransfers)
2512 return VINF_SUCCESS;
2513 }
2514
2515 /* Convert source address ds:esi. */
2516 RTGCPTR GCPtrSrc;
2517 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2518 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2519 &GCPtrSrc);
2520 if (RT_FAILURE(rc2))
2521 {
2522 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2523 return VINF_EM_RAW_EMULATE_INSTR;
2524 }
2525
2526 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2527 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2528 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2529 (cpl == 3) ? X86_PTE_US : 0);
2530 if (rc2 != VINF_SUCCESS)
2531 {
2532 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2533 return VINF_EM_RAW_EMULATE_INSTR;
2534 }
2535
2536 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2537 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2538 if (cTransfers > 1)
2539 {
2540 /*
2541 * Work the string page by page, letting the device handle as much
2542 * as it likes via the string I/O interface.
2543 */
2544 for (;;)
2545 {
2546 PGMPAGEMAPLOCK Lock;
2547 void const *pvSrc;
2548 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2549 if (RT_SUCCESS(rc2))
2550 {
2551 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK)) / cbTransfer;
2552 if (cMaxThisTime > cTransfers)
2553 cMaxThisTime = cTransfers;
2554 if (!cMaxThisTime)
2555 break;
2556 uint32_t cThisTime = cMaxThisTime;
2557
2558 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, pvSrc, &cThisTime, cbTransfer);
2559 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2560 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2561
2562 PGMPhysReleasePageMappingLock(pVM, &Lock);
2563
2564 uint32_t const cActual = cMaxThisTime - cThisTime;
2565 uint32_t const cbActual = cActual * cbTransfer;
2566 cTransfers -= cActual;
2567 pRegFrame->rsi = ((pRegFrame->rsi + cbActual) & fAddrMask)
2568 | (pRegFrame->rsi & ~fAddrMask);
2569 GCPtrSrc += cbActual;
2570
2571 if ( cThisTime
2572 || !cTransfers
2573 || rcStrict != VINF_SUCCESS
2574 || (GCPtrSrc & PAGE_OFFSET_MASK))
2575 break;
2576 }
2577 else
2578 {
2579 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtrReadOnly %#RGv -> %Rrc\n", GCPtrSrc, rc2));
2580 break;
2581 }
2582 }
2583 }
2584
2585 /*
2586 * Single transfer / unmapped memory fallback.
2587 */
2588#ifdef IN_RC
2589 MMGCRamRegisterTrapHandler(pVM);
2590#endif
2591
2592 while (cTransfers && rcStrict == VINF_SUCCESS)
2593 {
2594 uint32_t u32Value = 0;
2595 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2596 if (rcStrict != VINF_SUCCESS)
2597 break;
2598 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2599 if (!IOM_SUCCESS(rcStrict))
2600 break;
2601 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2602 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2603 | (pRegFrame->rsi & ~fAddrMask);
2604 cTransfers--;
2605 }
2606
2607#ifdef IN_RC
2608 MMGCRamDeregisterTrapHandler(pVM);
2609#endif
2610
2611 /* Update rcx on exit. */
2612 if (uPrefix & DISPREFIX_REP)
2613 pRegFrame->rcx = (cTransfers & fAddrMask)
2614 | (pRegFrame->rcx & ~fAddrMask);
2615
2616 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2617 return rcStrict;
2618}
2619
2620#endif /* !IEM_USE_IEM_INSTEAD */
2621
2622
2623#ifndef IN_RC
2624
2625/**
2626 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2627 *
2628 * (This is a special optimization used by the VGA device.)
2629 *
2630 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2631 * remapping is made,.
2632 *
2633 * @param pVM The cross context VM structure.
2634 * @param GCPhys The address of the MMIO page to be changed.
2635 * @param GCPhysRemapped The address of the MMIO2 page.
2636 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2637 * for the time being.
2638 */
2639VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2640{
2641# ifndef IEM_VERIFICATION_MODE_FULL
2642 /* Currently only called from the VGA device during MMIO. */
2643 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2644 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2645 PVMCPU pVCpu = VMMGetCpu(pVM);
2646
2647 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2648 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2649 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2650 && !HMIsNestedPagingActive(pVM)))
2651 return VINF_SUCCESS; /* ignore */
2652
2653 int rc = IOM_LOCK_SHARED(pVM);
2654 if (RT_FAILURE(rc))
2655 return VINF_SUCCESS; /* better luck the next time around */
2656
2657 /*
2658 * Lookup the context range node the page belongs to.
2659 */
2660 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2661 AssertMsgReturn(pRange,
2662 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2663
2664 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2665 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2666
2667 /*
2668 * Do the aliasing; page align the addresses since PGM is picky.
2669 */
2670 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2671 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2672
2673 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2674
2675 IOM_UNLOCK_SHARED(pVM);
2676 AssertRCReturn(rc, rc);
2677
2678 /*
2679 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2680 * can simply prefetch it.
2681 *
2682 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2683 */
2684# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2685# ifdef VBOX_STRICT
2686 uint64_t fFlags;
2687 RTHCPHYS HCPhys;
2688 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2689 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2690# endif
2691# endif
2692 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2693 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2694# endif /* !IEM_VERIFICATION_MODE_FULL */
2695 return VINF_SUCCESS;
2696}
2697
2698
2699# ifndef IEM_VERIFICATION_MODE_FULL
2700/**
2701 * Mapping a HC page in place of an MMIO page for direct access.
2702 *
2703 * (This is a special optimization used by the APIC in the VT-x case.)
2704 *
2705 * @returns VBox status code.
2706 *
2707 * @param pVM The cross context VM structure.
2708 * @param pVCpu The cross context virtual CPU structure.
2709 * @param GCPhys The address of the MMIO page to be changed.
2710 * @param HCPhys The address of the host physical page.
2711 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2712 * for the time being.
2713 */
2714VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2715{
2716 /* Currently only called from VT-x code during a page fault. */
2717 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2718
2719 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2720 Assert(HMIsEnabled(pVM));
2721
2722 /*
2723 * Lookup the context range node the page belongs to.
2724 */
2725# ifdef VBOX_STRICT
2726 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2727 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2728 AssertMsgReturn(pRange,
2729 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2730 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2731 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2732# endif
2733
2734 /*
2735 * Do the aliasing; page align the addresses since PGM is picky.
2736 */
2737 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2738 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2739
2740 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2741 AssertRCReturn(rc, rc);
2742
2743 /*
2744 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2745 * can simply prefetch it.
2746 *
2747 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2748 */
2749 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2750 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2751 return VINF_SUCCESS;
2752}
2753# endif /* !IEM_VERIFICATION_MODE_FULL */
2754
2755
2756/**
2757 * Reset a previously modified MMIO region; restore the access flags.
2758 *
2759 * @returns VBox status code.
2760 *
2761 * @param pVM The cross context VM structure.
2762 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2763 */
2764VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2765{
2766 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2767
2768 PVMCPU pVCpu = VMMGetCpu(pVM);
2769
2770 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2771 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2772 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2773 && !HMIsNestedPagingActive(pVM)))
2774 return VINF_SUCCESS; /* ignore */
2775
2776 /*
2777 * Lookup the context range node the page belongs to.
2778 */
2779# ifdef VBOX_STRICT
2780 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2781 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2782 AssertMsgReturn(pRange,
2783 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2784 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2785 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2786# endif
2787
2788 /*
2789 * Call PGM to do the job work.
2790 *
2791 * After the call, all the pages should be non-present... unless there is
2792 * a page pool flush pending (unlikely).
2793 */
2794 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2795 AssertRC(rc);
2796
2797# ifdef VBOX_STRICT
2798 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2799 {
2800 uint32_t cb = pRange->cb;
2801 GCPhys = pRange->GCPhys;
2802 while (cb)
2803 {
2804 uint64_t fFlags;
2805 RTHCPHYS HCPhys;
2806 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2807 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2808 cb -= PAGE_SIZE;
2809 GCPhys += PAGE_SIZE;
2810 }
2811 }
2812# endif
2813 return rc;
2814}
2815
2816#endif /* !IN_RC */
2817
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette