VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 45305

Last change on this file since 45305 was 45305, checked in by vboxsync, 12 years ago

IOM: Adding pVCpu to a lot of calls and moving the lookup caches from VM to VMCPU.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 91.1 KB
Line 
1/* $Id: IOMAllMMIO.cpp 45305 2013-04-03 11:15:02Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
32# include <VBox/vmm/iem.h>
33#endif
34#include "IOMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/hm.h>
38#include "IOMInline.h"
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/vmm/pdmdev.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <VBox/log.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54
55/**
56 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
57 */
58static const unsigned g_aSize2Shift[] =
59{
60 ~0U, /* 0 - invalid */
61 0, /* *1 == 2^0 */
62 1, /* *2 == 2^1 */
63 ~0U, /* 3 - invalid */
64 2, /* *4 == 2^2 */
65 ~0U, /* 5 - invalid */
66 ~0U, /* 6 - invalid */
67 ~0U, /* 7 - invalid */
68 3 /* *8 == 2^3 */
69};
70
71/**
72 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
73 */
74#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
75
76
77/**
78 * Deals with complicated MMIO writes.
79 *
80 * Complicated means unaligned or non-dword/qword sized accesses depending on
81 * the MMIO region's access mode flags.
82 *
83 * @returns Strict VBox status code. Any EM scheduling status code,
84 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
85 * VINF_IOM_R3_MMIO_READ may be returned.
86 *
87 * @param pVM Pointer to the VM.
88 * @param pRange The range to write to.
89 * @param GCPhys The physical address to start writing.
90 * @param pvValue Where to store the value.
91 * @param cbValue The size of the value to write.
92 */
93static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
94{
95 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
96 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
97 VERR_IOM_MMIO_IPE_1);
98 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
99 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
100 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
101 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
102
103 /*
104 * Do debug stop if requested.
105 */
106 int rc = VINF_SUCCESS; NOREF(pVM);
107#ifdef VBOX_STRICT
108 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
109 {
110# ifdef IN_RING3
111 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
112 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
113 if (rc == VERR_DBGF_NOT_ATTACHED)
114 rc = VINF_SUCCESS;
115# else
116 return VINF_IOM_R3_MMIO_WRITE;
117# endif
118 }
119#endif
120
121 /*
122 * Check if we should ignore the write.
123 */
124 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
125 {
126 Assert(cbValue != 4 || (GCPhys & 3));
127 return VINF_SUCCESS;
128 }
129 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
130 {
131 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
132 return VINF_SUCCESS;
133 }
134
135 /*
136 * Split and conquer.
137 */
138 for (;;)
139 {
140 unsigned const offAccess = GCPhys & 3;
141 unsigned cbThisPart = 4 - offAccess;
142 if (cbThisPart > cbValue)
143 cbThisPart = cbValue;
144
145 /*
146 * Get the missing bits (if any).
147 */
148 uint32_t u32MissingValue = 0;
149 if (fReadMissing && cbThisPart != 4)
150 {
151 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
152 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
153 switch (rc2)
154 {
155 case VINF_SUCCESS:
156 break;
157 case VINF_IOM_MMIO_UNUSED_FF:
158 u32MissingValue = UINT32_C(0xffffffff);
159 break;
160 case VINF_IOM_MMIO_UNUSED_00:
161 u32MissingValue = 0;
162 break;
163 case VINF_IOM_R3_MMIO_READ:
164 case VINF_IOM_R3_MMIO_READ_WRITE:
165 case VINF_IOM_R3_MMIO_WRITE:
166 /** @todo What if we've split a transfer and already read
167 * something? Since writes generally have sideeffects we
168 * could be kind of screwed here...
169 *
170 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
171 * to REM for MMIO accesses (like may currently do). */
172
173 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
174 return rc2;
175 default:
176 if (RT_FAILURE(rc2))
177 {
178 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
179 return rc2;
180 }
181 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
182 if (rc == VINF_SUCCESS || rc2 < rc)
183 rc = rc2;
184 break;
185 }
186 }
187
188 /*
189 * Merge missing and given bits.
190 */
191 uint32_t u32GivenMask;
192 uint32_t u32GivenValue;
193 switch (cbThisPart)
194 {
195 case 1:
196 u32GivenValue = *(uint8_t const *)pvValue;
197 u32GivenMask = UINT32_C(0x000000ff);
198 break;
199 case 2:
200 u32GivenValue = *(uint16_t const *)pvValue;
201 u32GivenMask = UINT32_C(0x0000ffff);
202 break;
203 case 3:
204 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
205 ((uint8_t const *)pvValue)[2], 0);
206 u32GivenMask = UINT32_C(0x00ffffff);
207 break;
208 case 4:
209 u32GivenValue = *(uint32_t const *)pvValue;
210 u32GivenMask = UINT32_C(0xffffffff);
211 break;
212 default:
213 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
214 }
215 if (offAccess)
216 {
217 u32GivenValue <<= offAccess * 8;
218 u32GivenMask <<= offAccess * 8;
219 }
220
221 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
222 | (u32GivenValue & u32GivenMask);
223
224 /*
225 * Do DWORD write to the device.
226 */
227 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
228 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
229 switch (rc2)
230 {
231 case VINF_SUCCESS:
232 break;
233 case VINF_IOM_R3_MMIO_READ:
234 case VINF_IOM_R3_MMIO_READ_WRITE:
235 case VINF_IOM_R3_MMIO_WRITE:
236 /** @todo What if we've split a transfer and already read
237 * something? Since reads can have sideeffects we could be
238 * kind of screwed here...
239 *
240 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
241 * to REM for MMIO accesses (like may currently do). */
242 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
243 return rc2;
244 default:
245 if (RT_FAILURE(rc2))
246 {
247 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
248 return rc2;
249 }
250 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
251 if (rc == VINF_SUCCESS || rc2 < rc)
252 rc = rc2;
253 break;
254 }
255
256 /*
257 * Advance.
258 */
259 cbValue -= cbThisPart;
260 if (!cbValue)
261 break;
262 GCPhys += cbThisPart;
263 pvValue = (uint8_t const *)pvValue + cbThisPart;
264 }
265
266 return rc;
267}
268
269
270
271
272/**
273 * Wrapper which does the write and updates range statistics when such are enabled.
274 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
275 */
276static int iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
277{
278#ifdef VBOX_WITH_STATISTICS
279 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
280 Assert(pStats);
281#endif
282
283 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
284 VBOXSTRICTRC rc;
285 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
286 {
287 if ( (cb == 4 && !(GCPhysFault & 3))
288 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
289 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
290 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
291 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
292 else
293 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
294 }
295 else
296 rc = VINF_SUCCESS;
297 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
298 STAM_COUNTER_INC(&pStats->Accesses);
299 return VBOXSTRICTRC_TODO(rc);
300}
301
302
303/**
304 * Deals with complicated MMIO reads.
305 *
306 * Complicatd means unaligned or non-dword/qword align accesses depending on
307 * the MMIO region's access mode flags.
308 *
309 * @returns Strict VBox status code. Any EM scheduling status code,
310 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
311 * VINF_IOM_R3_MMIO_WRITE may be returned.
312 *
313 * @param pVM Pointer to the VM.
314 * @param pRange The range to read from.
315 * @param GCPhys The physical address to start reading.
316 * @param pvValue Where to store the value.
317 * @param cbValue The size of the value to read.
318 */
319static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
320{
321 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
322 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
323 VERR_IOM_MMIO_IPE_1);
324 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
325 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
326
327 /*
328 * Do debug stop if requested.
329 */
330 int rc = VINF_SUCCESS; NOREF(pVM);
331#ifdef VBOX_STRICT
332 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
333 {
334# ifdef IN_RING3
335 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
336 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
337 if (rc == VERR_DBGF_NOT_ATTACHED)
338 rc = VINF_SUCCESS;
339# else
340 return VINF_IOM_R3_MMIO_READ;
341# endif
342 }
343#endif
344
345 /*
346 * Split and conquer.
347 */
348 for (;;)
349 {
350 /*
351 * Do DWORD read from the device.
352 */
353 uint32_t u32Value;
354 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
355 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
356 switch (rc2)
357 {
358 case VINF_SUCCESS:
359 break;
360 case VINF_IOM_MMIO_UNUSED_FF:
361 u32Value = UINT32_C(0xffffffff);
362 break;
363 case VINF_IOM_MMIO_UNUSED_00:
364 u32Value = 0;
365 break;
366 case VINF_IOM_R3_MMIO_READ:
367 case VINF_IOM_R3_MMIO_READ_WRITE:
368 case VINF_IOM_R3_MMIO_WRITE:
369 /** @todo What if we've split a transfer and already read
370 * something? Since reads can have sideeffects we could be
371 * kind of screwed here... */
372 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
373 return rc2;
374 default:
375 if (RT_FAILURE(rc2))
376 {
377 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
378 return rc2;
379 }
380 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
381 if (rc == VINF_SUCCESS || rc2 < rc)
382 rc = rc2;
383 break;
384 }
385 u32Value >>= (GCPhys & 3) * 8;
386
387 /*
388 * Write what we've read.
389 */
390 unsigned cbThisPart = 4 - (GCPhys & 3);
391 if (cbThisPart > cbValue)
392 cbThisPart = cbValue;
393
394 switch (cbThisPart)
395 {
396 case 1:
397 *(uint8_t *)pvValue = (uint8_t)u32Value;
398 break;
399 case 2:
400 *(uint16_t *)pvValue = (uint16_t)u32Value;
401 break;
402 case 3:
403 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
404 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
405 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
406 break;
407 case 4:
408 *(uint32_t *)pvValue = u32Value;
409 break;
410 }
411
412 /*
413 * Advance.
414 */
415 cbValue -= cbThisPart;
416 if (!cbValue)
417 break;
418 GCPhys += cbThisPart;
419 pvValue = (uint8_t *)pvValue + cbThisPart;
420 }
421
422 return rc;
423}
424
425
426/**
427 * Implements VINF_IOM_MMIO_UNUSED_FF.
428 *
429 * @returns VINF_SUCCESS.
430 * @param pvValue Where to store the zeros.
431 * @param cbValue How many bytes to read.
432 */
433static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
434{
435 switch (cbValue)
436 {
437 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
438 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
439 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
440 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
441 default:
442 {
443 uint8_t *pb = (uint8_t *)pvValue;
444 while (cbValue--)
445 *pb++ = UINT8_C(0xff);
446 break;
447 }
448 }
449 return VINF_SUCCESS;
450}
451
452
453/**
454 * Implements VINF_IOM_MMIO_UNUSED_00.
455 *
456 * @returns VINF_SUCCESS.
457 * @param pvValue Where to store the zeros.
458 * @param cbValue How many bytes to read.
459 */
460static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
461{
462 switch (cbValue)
463 {
464 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
465 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
466 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
467 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
468 default:
469 {
470 uint8_t *pb = (uint8_t *)pvValue;
471 while (cbValue--)
472 *pb++ = UINT8_C(0x00);
473 break;
474 }
475 }
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Wrapper which does the read and updates range statistics when such are enabled.
482 */
483DECLINLINE(int) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
484{
485#ifdef VBOX_WITH_STATISTICS
486 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
487 Assert(pStats);
488 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
489#endif
490
491 VBOXSTRICTRC rc;
492 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
493 {
494 if ( ( cbValue == 4
495 && !(GCPhys & 3))
496 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
497 || ( cbValue == 8
498 && !(GCPhys & 7)
499 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
500 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
501 else
502 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
503 }
504 else
505 rc = VINF_IOM_MMIO_UNUSED_FF;
506 if (rc != VINF_SUCCESS)
507 {
508 switch (VBOXSTRICTRC_VAL(rc))
509 {
510 case VINF_IOM_MMIO_UNUSED_FF: rc = iomMMIODoReadFFs(pvValue, cbValue); break;
511 case VINF_IOM_MMIO_UNUSED_00: rc = iomMMIODoRead00s(pvValue, cbValue); break;
512 }
513 }
514 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
515 STAM_COUNTER_INC(&pStats->Accesses);
516 return VBOXSTRICTRC_VAL(rc);
517}
518
519
520/**
521 * Internal - statistics only.
522 */
523DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
524{
525#ifdef VBOX_WITH_STATISTICS
526 switch (cb)
527 {
528 case 1:
529 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
530 break;
531 case 2:
532 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
533 break;
534 case 4:
535 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
536 break;
537 case 8:
538 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
539 break;
540 default:
541 /* No way. */
542 AssertMsgFailed(("Invalid data length %d\n", cb));
543 break;
544 }
545#else
546 NOREF(pVM); NOREF(cb);
547#endif
548}
549
550
551/**
552 * MOV reg, mem (read)
553 * MOVZX reg, mem (read)
554 * MOVSX reg, mem (read)
555 *
556 * @returns VBox status code.
557 *
558 * @param pVM The virtual machine.
559 * @param pVCpu Pointer to the virtual CPU structure of the caller.
560 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
561 * @param pCpu Disassembler CPU state.
562 * @param pRange Pointer MMIO range.
563 * @param GCPhysFault The GC physical address corresponding to pvFault.
564 */
565static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
566 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
567{
568 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
569
570 /*
571 * Get the data size from parameter 2,
572 * and call the handler function to get the data.
573 */
574 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
575 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
576
577 uint64_t u64Data = 0;
578 int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb);
579 if (rc == VINF_SUCCESS)
580 {
581 /*
582 * Do sign extension for MOVSX.
583 */
584 /** @todo checkup MOVSX implementation! */
585 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
586 {
587 if (cb == 1)
588 {
589 /* DWORD <- BYTE */
590 int64_t iData = (int8_t)u64Data;
591 u64Data = (uint64_t)iData;
592 }
593 else
594 {
595 /* DWORD <- WORD */
596 int64_t iData = (int16_t)u64Data;
597 u64Data = (uint64_t)iData;
598 }
599 }
600
601 /*
602 * Store the result to register (parameter 1).
603 */
604 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
605 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
606 }
607
608 if (rc == VINF_SUCCESS)
609 iomMMIOStatLength(pVM, cb);
610 return rc;
611}
612
613
614/**
615 * MOV mem, reg|imm (write)
616 *
617 * @returns VBox status code.
618 *
619 * @param pVM The virtual machine.
620 * @param pVCpu Pointer to the virtual CPU structure of the caller.
621 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
622 * @param pCpu Disassembler CPU state.
623 * @param pRange Pointer MMIO range.
624 * @param GCPhysFault The GC physical address corresponding to pvFault.
625 */
626static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
627 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
628{
629 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
630
631 /*
632 * Get data to write from second parameter,
633 * and call the callback to write it.
634 */
635 unsigned cb = 0;
636 uint64_t u64Data = 0;
637 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
638 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
639
640 int rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb);
641 if (rc == VINF_SUCCESS)
642 iomMMIOStatLength(pVM, cb);
643 return rc;
644}
645
646
647/** Wrapper for reading virtual memory. */
648DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
649{
650 /* Note: This will fail in R0 or RC if it hits an access handler. That
651 isn't a problem though since the operation can be restarted in REM. */
652#ifdef IN_RC
653 NOREF(pVCpu);
654 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
655 /* Page may be protected and not directly accessible. */
656 if (rc == VERR_ACCESS_DENIED)
657 rc = VINF_IOM_R3_IOPORT_WRITE;
658 return rc;
659#else
660 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
661#endif
662}
663
664
665/** Wrapper for writing virtual memory. */
666DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
667{
668 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
669 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
670 * as well since we're not behind the pgm lock and handler may change between calls.
671 *
672 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
673 * the state of some shadowed structures. */
674#if defined(IN_RING0) || defined(IN_RC)
675 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
676#else
677 NOREF(pCtxCore);
678 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
679#endif
680}
681
682
683#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
684/**
685 * [REP] MOVSB
686 * [REP] MOVSW
687 * [REP] MOVSD
688 *
689 * Restricted implementation.
690 *
691 *
692 * @returns VBox status code.
693 *
694 * @param pVM The virtual machine.
695 * @param uErrorCode CPU Error code.
696 * @param pRegFrame Trap register frame.
697 * @param GCPhysFault The GC physical address corresponding to pvFault.
698 * @param pCpu Disassembler CPU state.
699 * @param pRange Pointer MMIO range.
700 * @param ppStat Which sub-sample to attribute this call to.
701 */
702static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
703 PSTAMPROFILE *ppStat)
704{
705 /*
706 * We do not support segment prefixes or REPNE.
707 */
708 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
709 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
710
711 PVMCPU pVCpu = VMMGetCpu(pVM);
712
713 /*
714 * Get bytes/words/dwords/qword count to copy.
715 */
716 uint32_t cTransfers = 1;
717 if (pCpu->fPrefix & DISPREFIX_REP)
718 {
719#ifndef IN_RC
720 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
721 && pRegFrame->rcx >= _4G)
722 return VINF_EM_RAW_EMULATE_INSTR;
723#endif
724
725 cTransfers = pRegFrame->ecx;
726 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
727 cTransfers &= 0xffff;
728
729 if (!cTransfers)
730 return VINF_SUCCESS;
731 }
732
733 /* Get the current privilege level. */
734 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
735
736 /*
737 * Get data size.
738 */
739 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
740 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
741 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
742
743#ifdef VBOX_WITH_STATISTICS
744 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
745 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
746#endif
747
748/** @todo re-evaluate on page boundaries. */
749
750 RTGCPHYS Phys = GCPhysFault;
751 int rc;
752 if (fWriteAccess)
753 {
754 /*
755 * Write operation: [Mem] -> [MMIO]
756 * ds:esi (Virt Src) -> es:edi (Phys Dst)
757 */
758 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
759
760 /* Check callback. */
761 if (!pRange->CTX_SUFF(pfnWriteCallback))
762 return VINF_IOM_R3_MMIO_WRITE;
763
764 /* Convert source address ds:esi. */
765 RTGCUINTPTR pu8Virt;
766 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
767 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
768 (PRTGCPTR)&pu8Virt);
769 if (RT_SUCCESS(rc))
770 {
771
772 /* Access verification first; we currently can't recover properly from traps inside this instruction */
773 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
774 if (rc != VINF_SUCCESS)
775 {
776 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
777 return VINF_EM_RAW_EMULATE_INSTR;
778 }
779
780#ifdef IN_RC
781 MMGCRamRegisterTrapHandler(pVM);
782#endif
783
784 /* copy loop. */
785 while (cTransfers)
786 {
787 uint32_t u32Data = 0;
788 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
789 if (rc != VINF_SUCCESS)
790 break;
791 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
792 if (rc != VINF_SUCCESS)
793 break;
794
795 pu8Virt += offIncrement;
796 Phys += offIncrement;
797 pRegFrame->rsi += offIncrement;
798 pRegFrame->rdi += offIncrement;
799 cTransfers--;
800 }
801#ifdef IN_RC
802 MMGCRamDeregisterTrapHandler(pVM);
803#endif
804 /* Update ecx. */
805 if (pCpu->fPrefix & DISPREFIX_REP)
806 pRegFrame->ecx = cTransfers;
807 }
808 else
809 rc = VINF_IOM_R3_MMIO_READ_WRITE;
810 }
811 else
812 {
813 /*
814 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
815 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
816 */
817 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
818
819 /* Check callback. */
820 if (!pRange->CTX_SUFF(pfnReadCallback))
821 return VINF_IOM_R3_MMIO_READ;
822
823 /* Convert destination address. */
824 RTGCUINTPTR pu8Virt;
825 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
826 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
827 (RTGCPTR *)&pu8Virt);
828 if (RT_FAILURE(rc))
829 return VINF_IOM_R3_MMIO_READ;
830
831 /* Check if destination address is MMIO. */
832 PIOMMMIORANGE pMMIODst;
833 RTGCPHYS PhysDst;
834 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
835 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
836 if ( RT_SUCCESS(rc)
837 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
838 {
839 /** @todo implement per-device locks for MMIO access. */
840 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
841
842 /*
843 * Extra: [MMIO] -> [MMIO]
844 */
845 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
846 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
847 {
848 iomMmioReleaseRange(pVM, pRange);
849 return VINF_IOM_R3_MMIO_READ_WRITE;
850 }
851
852 /* copy loop. */
853 while (cTransfers)
854 {
855 uint32_t u32Data;
856 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
857 if (rc != VINF_SUCCESS)
858 break;
859 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
860 if (rc != VINF_SUCCESS)
861 break;
862
863 Phys += offIncrement;
864 PhysDst += offIncrement;
865 pRegFrame->rsi += offIncrement;
866 pRegFrame->rdi += offIncrement;
867 cTransfers--;
868 }
869 iomMmioReleaseRange(pVM, pRange);
870 }
871 else
872 {
873 /*
874 * Normal: [MMIO] -> [Mem]
875 */
876 /* Access verification first; we currently can't recover properly from traps inside this instruction */
877 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
878 if (rc != VINF_SUCCESS)
879 {
880 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
881 return VINF_EM_RAW_EMULATE_INSTR;
882 }
883
884 /* copy loop. */
885#ifdef IN_RC
886 MMGCRamRegisterTrapHandler(pVM);
887#endif
888 while (cTransfers)
889 {
890 uint32_t u32Data;
891 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
892 if (rc != VINF_SUCCESS)
893 break;
894 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
895 if (rc != VINF_SUCCESS)
896 {
897 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
898 break;
899 }
900
901 pu8Virt += offIncrement;
902 Phys += offIncrement;
903 pRegFrame->rsi += offIncrement;
904 pRegFrame->rdi += offIncrement;
905 cTransfers--;
906 }
907#ifdef IN_RC
908 MMGCRamDeregisterTrapHandler(pVM);
909#endif
910 }
911
912 /* Update ecx on exit. */
913 if (pCpu->fPrefix & DISPREFIX_REP)
914 pRegFrame->ecx = cTransfers;
915 }
916
917 /* work statistics. */
918 if (rc == VINF_SUCCESS)
919 iomMMIOStatLength(pVM, cb);
920 NOREF(ppStat);
921 return rc;
922}
923#endif /* IOM_WITH_MOVS_SUPPORT */
924
925
926/**
927 * Gets the address / opcode mask corresponding to the given CPU mode.
928 *
929 * @returns Mask.
930 * @param enmCpuMode CPU mode.
931 */
932static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
933{
934 switch (enmCpuMode)
935 {
936 case DISCPUMODE_16BIT: return UINT16_MAX;
937 case DISCPUMODE_32BIT: return UINT32_MAX;
938 case DISCPUMODE_64BIT: return UINT64_MAX;
939 default:
940 AssertFailedReturn(UINT32_MAX);
941 }
942}
943
944
945/**
946 * [REP] STOSB
947 * [REP] STOSW
948 * [REP] STOSD
949 *
950 * Restricted implementation.
951 *
952 *
953 * @returns VBox status code.
954 *
955 * @param pVM The virtual machine.
956 * @param pVCpu Pointer to the virtual CPU structure of the caller.
957 * @param pRegFrame Trap register frame.
958 * @param GCPhysFault The GC physical address corresponding to pvFault.
959 * @param pCpu Disassembler CPU state.
960 * @param pRange Pointer MMIO range.
961 */
962static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
963 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
964{
965 /*
966 * We do not support segment prefixes or REPNE..
967 */
968 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
969 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
970
971 /*
972 * Get bytes/words/dwords/qwords count to copy.
973 */
974 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
975 RTGCUINTREG cTransfers = 1;
976 if (pCpu->fPrefix & DISPREFIX_REP)
977 {
978#ifndef IN_RC
979 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM))
980 && pRegFrame->rcx >= _4G)
981 return VINF_EM_RAW_EMULATE_INSTR;
982#endif
983
984 cTransfers = pRegFrame->rcx & fAddrMask;
985 if (!cTransfers)
986 return VINF_SUCCESS;
987 }
988
989/** @todo r=bird: bounds checks! */
990
991 /*
992 * Get data size.
993 */
994 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
995 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
996 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
997
998#ifdef VBOX_WITH_STATISTICS
999 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1000 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1001#endif
1002
1003
1004 RTGCPHYS Phys = GCPhysFault;
1005 int rc;
1006 if ( pRange->CTX_SUFF(pfnFillCallback)
1007 && cb <= 4 /* can only fill 32-bit values */)
1008 {
1009 /*
1010 * Use the fill callback.
1011 */
1012 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1013 if (offIncrement > 0)
1014 {
1015 /* addr++ variant. */
1016 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1017 pRegFrame->eax, cb, cTransfers);
1018 if (rc == VINF_SUCCESS)
1019 {
1020 /* Update registers. */
1021 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1022 | (pRegFrame->rdi & ~fAddrMask);
1023 if (pCpu->fPrefix & DISPREFIX_REP)
1024 pRegFrame->rcx &= ~fAddrMask;
1025 }
1026 }
1027 else
1028 {
1029 /* addr-- variant. */
1030 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1031 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1032 pRegFrame->eax, cb, cTransfers);
1033 if (rc == VINF_SUCCESS)
1034 {
1035 /* Update registers. */
1036 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1037 | (pRegFrame->rdi & ~fAddrMask);
1038 if (pCpu->fPrefix & DISPREFIX_REP)
1039 pRegFrame->rcx &= ~fAddrMask;
1040 }
1041 }
1042 }
1043 else
1044 {
1045 /*
1046 * Use the write callback.
1047 */
1048 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1049 uint64_t u64Data = pRegFrame->rax;
1050
1051 /* fill loop. */
1052 do
1053 {
1054 rc = iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb);
1055 if (rc != VINF_SUCCESS)
1056 break;
1057
1058 Phys += offIncrement;
1059 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1060 | (pRegFrame->rdi & ~fAddrMask);
1061 cTransfers--;
1062 } while (cTransfers);
1063
1064 /* Update rcx on exit. */
1065 if (pCpu->fPrefix & DISPREFIX_REP)
1066 pRegFrame->rcx = (cTransfers & fAddrMask)
1067 | (pRegFrame->rcx & ~fAddrMask);
1068 }
1069
1070 /*
1071 * Work statistics and return.
1072 */
1073 if (rc == VINF_SUCCESS)
1074 iomMMIOStatLength(pVM, cb);
1075 return rc;
1076}
1077
1078
1079/**
1080 * [REP] LODSB
1081 * [REP] LODSW
1082 * [REP] LODSD
1083 *
1084 * Restricted implementation.
1085 *
1086 *
1087 * @returns VBox status code.
1088 *
1089 * @param pVM The virtual machine.
1090 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1091 * @param pRegFrame Trap register frame.
1092 * @param GCPhysFault The GC physical address corresponding to pvFault.
1093 * @param pCpu Disassembler CPU state.
1094 * @param pRange Pointer MMIO range.
1095 */
1096static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1097 PIOMMMIORANGE pRange)
1098{
1099 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1100
1101 /*
1102 * We do not support segment prefixes or REP*.
1103 */
1104 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1105 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1106
1107 /*
1108 * Get data size.
1109 */
1110 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1111 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1112 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1113
1114 /*
1115 * Perform read.
1116 */
1117 int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb);
1118 if (rc == VINF_SUCCESS)
1119 {
1120 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1121 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1122 | (pRegFrame->rsi & ~fAddrMask);
1123 }
1124
1125 /*
1126 * Work statistics and return.
1127 */
1128 if (rc == VINF_SUCCESS)
1129 iomMMIOStatLength(pVM, cb);
1130 return rc;
1131}
1132
1133
1134/**
1135 * CMP [MMIO], reg|imm
1136 * CMP reg|imm, [MMIO]
1137 *
1138 * Restricted implementation.
1139 *
1140 *
1141 * @returns VBox status code.
1142 *
1143 * @param pVM The virtual machine.
1144 * @param pRegFrame Trap register frame.
1145 * @param GCPhysFault The GC physical address corresponding to pvFault.
1146 * @param pCpu Disassembler CPU state.
1147 * @param pRange Pointer MMIO range.
1148 */
1149static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1150 PIOMMMIORANGE pRange)
1151{
1152 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1153
1154 /*
1155 * Get the operands.
1156 */
1157 unsigned cb = 0;
1158 uint64_t uData1 = 0;
1159 uint64_t uData2 = 0;
1160 int rc;
1161 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1162 /* cmp reg, [MMIO]. */
1163 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);
1164 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1165 /* cmp [MMIO], reg|imm. */
1166 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1167 else
1168 {
1169 AssertMsgFailed(("Disassember CMP problem..\n"));
1170 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1171 }
1172
1173 if (rc == VINF_SUCCESS)
1174 {
1175#if HC_ARCH_BITS == 32
1176 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1177 if (cb > 4)
1178 return VINF_IOM_R3_MMIO_READ_WRITE;
1179#endif
1180 /* Emulate CMP and update guest flags. */
1181 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1182 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1183 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1184 iomMMIOStatLength(pVM, cb);
1185 }
1186
1187 return rc;
1188}
1189
1190
1191/**
1192 * AND [MMIO], reg|imm
1193 * AND reg, [MMIO]
1194 * OR [MMIO], reg|imm
1195 * OR reg, [MMIO]
1196 *
1197 * Restricted implementation.
1198 *
1199 *
1200 * @returns VBox status code.
1201 *
1202 * @param pVM The virtual machine.
1203 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1204 * @param pRegFrame Trap register frame.
1205 * @param GCPhysFault The GC physical address corresponding to pvFault.
1206 * @param pCpu Disassembler CPU state.
1207 * @param pRange Pointer MMIO range.
1208 * @param pfnEmulate Instruction emulation function.
1209 */
1210static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1211 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1212{
1213 unsigned cb = 0;
1214 uint64_t uData1 = 0;
1215 uint64_t uData2 = 0;
1216 bool fAndWrite;
1217 int rc;
1218
1219#ifdef LOG_ENABLED
1220 const char *pszInstr;
1221
1222 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1223 pszInstr = "Xor";
1224 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1225 pszInstr = "Or";
1226 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1227 pszInstr = "And";
1228 else
1229 pszInstr = "OrXorAnd??";
1230#endif
1231
1232 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1233 {
1234#if HC_ARCH_BITS == 32
1235 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1236 if (cb > 4)
1237 return VINF_IOM_R3_MMIO_READ_WRITE;
1238#endif
1239 /* and reg, [MMIO]. */
1240 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1241 fAndWrite = false;
1242 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);
1243 }
1244 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1245 {
1246#if HC_ARCH_BITS == 32
1247 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1248 if (cb > 4)
1249 return VINF_IOM_R3_MMIO_READ_WRITE;
1250#endif
1251 /* and [MMIO], reg|imm. */
1252 fAndWrite = true;
1253 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1254 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1255 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1256 else
1257 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1258 }
1259 else
1260 {
1261 AssertMsgFailed(("Disassember AND problem..\n"));
1262 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1263 }
1264
1265 if (rc == VINF_SUCCESS)
1266 {
1267 /* Emulate AND and update guest flags. */
1268 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1269
1270 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1271
1272 if (fAndWrite)
1273 /* Store result to MMIO. */
1274 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1275 else
1276 {
1277 /* Store result to register. */
1278 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1279 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1280 }
1281 if (rc == VINF_SUCCESS)
1282 {
1283 /* Update guest's eflags and finish. */
1284 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1285 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1286 iomMMIOStatLength(pVM, cb);
1287 }
1288 }
1289
1290 return rc;
1291}
1292
1293
1294/**
1295 * TEST [MMIO], reg|imm
1296 * TEST reg, [MMIO]
1297 *
1298 * Restricted implementation.
1299 *
1300 *
1301 * @returns VBox status code.
1302 *
1303 * @param pVM The virtual machine.
1304 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1305 * @param pRegFrame Trap register frame.
1306 * @param GCPhysFault The GC physical address corresponding to pvFault.
1307 * @param pCpu Disassembler CPU state.
1308 * @param pRange Pointer MMIO range.
1309 */
1310static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1311 PIOMMMIORANGE pRange)
1312{
1313 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1314
1315 unsigned cb = 0;
1316 uint64_t uData1 = 0;
1317 uint64_t uData2 = 0;
1318 int rc;
1319
1320 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1321 {
1322 /* and test, [MMIO]. */
1323 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);
1324 }
1325 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1326 {
1327 /* test [MMIO], reg|imm. */
1328 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1329 }
1330 else
1331 {
1332 AssertMsgFailed(("Disassember TEST problem..\n"));
1333 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1334 }
1335
1336 if (rc == VINF_SUCCESS)
1337 {
1338#if HC_ARCH_BITS == 32
1339 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1340 if (cb > 4)
1341 return VINF_IOM_R3_MMIO_READ_WRITE;
1342#endif
1343
1344 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1345 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1346 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1347 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1348 iomMMIOStatLength(pVM, cb);
1349 }
1350
1351 return rc;
1352}
1353
1354
1355/**
1356 * BT [MMIO], reg|imm
1357 *
1358 * Restricted implementation.
1359 *
1360 *
1361 * @returns VBox status code.
1362 *
1363 * @param pVM The virtual machine.
1364 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1365 * @param pRegFrame Trap register frame.
1366 * @param GCPhysFault The GC physical address corresponding to pvFault.
1367 * @param pCpu Disassembler CPU state.
1368 * @param pRange Pointer MMIO range.
1369 */
1370static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1371 PIOMMMIORANGE pRange)
1372{
1373 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1374
1375 uint64_t uBit = 0;
1376 uint64_t uData = 0;
1377 unsigned cbIgnored;
1378
1379 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1380 {
1381 AssertMsgFailed(("Disassember BT problem..\n"));
1382 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1383 }
1384 /* The size of the memory operand only matters here. */
1385 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1386
1387 /* bt [MMIO], reg|imm. */
1388 int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData);
1389 if (rc == VINF_SUCCESS)
1390 {
1391 /* Find the bit inside the faulting address */
1392 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1393 iomMMIOStatLength(pVM, cbData);
1394 }
1395
1396 return rc;
1397}
1398
1399/**
1400 * XCHG [MMIO], reg
1401 * XCHG reg, [MMIO]
1402 *
1403 * Restricted implementation.
1404 *
1405 *
1406 * @returns VBox status code.
1407 *
1408 * @param pVM The virtual machine.
1409 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1410 * @param pRegFrame Trap register frame.
1411 * @param GCPhysFault The GC physical address corresponding to pvFault.
1412 * @param pCpu Disassembler CPU state.
1413 * @param pRange Pointer MMIO range.
1414 */
1415static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1416 PIOMMMIORANGE pRange)
1417{
1418 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1419 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1420 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1421 return VINF_IOM_R3_MMIO_READ_WRITE;
1422
1423 int rc;
1424 unsigned cb = 0;
1425 uint64_t uData1 = 0;
1426 uint64_t uData2 = 0;
1427 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1428 {
1429 /* xchg reg, [MMIO]. */
1430 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);
1431 if (rc == VINF_SUCCESS)
1432 {
1433 /* Store result to MMIO. */
1434 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1435
1436 if (rc == VINF_SUCCESS)
1437 {
1438 /* Store result to register. */
1439 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1440 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1441 }
1442 else
1443 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1444 }
1445 else
1446 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1447 }
1448 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1449 {
1450 /* xchg [MMIO], reg. */
1451 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1452 if (rc == VINF_SUCCESS)
1453 {
1454 /* Store result to MMIO. */
1455 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);
1456 if (rc == VINF_SUCCESS)
1457 {
1458 /* Store result to register. */
1459 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1460 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1461 }
1462 else
1463 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1464 }
1465 else
1466 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1467 }
1468 else
1469 {
1470 AssertMsgFailed(("Disassember XCHG problem..\n"));
1471 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1472 }
1473 return rc;
1474}
1475
1476
1477/**
1478 * \#PF Handler callback for MMIO ranges.
1479 *
1480 * @returns VBox status code (appropriate for GC return).
1481 * @param pVM Pointer to the VM.
1482 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1483 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1484 * any error code (the EPT misconfig hack).
1485 * @param pCtxCore Trap register frame.
1486 * @param GCPhysFault The GC physical address corresponding to pvFault.
1487 * @param pvUser Pointer to the MMIO ring-3 range entry.
1488 */
1489static int iomMMIOHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1490{
1491 int rc = IOM_LOCK(pVM);
1492#ifndef IN_RING3
1493 if (rc == VERR_SEM_BUSY)
1494 return VINF_IOM_R3_MMIO_READ_WRITE;
1495#endif
1496 AssertRC(rc);
1497
1498 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1499 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1500
1501 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1502 Assert(pRange);
1503 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1504
1505#ifdef VBOX_WITH_STATISTICS
1506 /*
1507 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1508 */
1509 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1510 if (!pStats)
1511 {
1512# ifdef IN_RING3
1513 IOM_UNLOCK(pVM);
1514 return VERR_NO_MEMORY;
1515# else
1516 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1517 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1518 IOM_UNLOCK(pVM);
1519 return VINF_IOM_R3_MMIO_READ_WRITE;
1520# endif
1521 }
1522#endif
1523
1524#ifndef IN_RING3
1525 /*
1526 * Should we defer the request right away? This isn't usually the case, so
1527 * do the simple test first and the try deal with uErrorCode being N/A.
1528 */
1529 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1530 || !pRange->CTX_SUFF(pfnReadCallback))
1531 && ( uErrorCode == UINT32_MAX
1532 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1533 : uErrorCode & X86_TRAP_PF_RW
1534 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1535 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1536 )
1537 )
1538 )
1539 {
1540 if (uErrorCode & X86_TRAP_PF_RW)
1541 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1542 else
1543 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1544
1545 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1546 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1547 IOM_UNLOCK(pVM);
1548 return VINF_IOM_R3_MMIO_READ_WRITE;
1549 }
1550#endif /* !IN_RING3 */
1551
1552 /*
1553 * Retain the range and do locking.
1554 */
1555 iomMmioRetainRange(pRange);
1556 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1557 IOM_UNLOCK(pVM);
1558 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1559 if (rc != VINF_SUCCESS)
1560 {
1561 iomMmioReleaseRange(pVM, pRange);
1562 return rc;
1563 }
1564
1565 /*
1566 * Disassemble the instruction and interpret it.
1567 */
1568 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1569 unsigned cbOp;
1570 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1571 if (RT_FAILURE(rc))
1572 {
1573 iomMmioReleaseRange(pVM, pRange);
1574 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1575 return rc;
1576 }
1577 switch (pDis->pCurInstr->uOpcode)
1578 {
1579 case OP_MOV:
1580 case OP_MOVZX:
1581 case OP_MOVSX:
1582 {
1583 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1584 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1585 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1586 ? uErrorCode & X86_TRAP_PF_RW
1587 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1588 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1589 else
1590 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1591 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1592 break;
1593 }
1594
1595
1596#ifdef IOM_WITH_MOVS_SUPPORT
1597 case OP_MOVSB:
1598 case OP_MOVSWD:
1599 {
1600 if (uErrorCode == UINT32_MAX)
1601 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1602 else
1603 {
1604 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1605 PSTAMPROFILE pStat = NULL;
1606 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1607 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1608 }
1609 break;
1610 }
1611#endif
1612
1613 case OP_STOSB:
1614 case OP_STOSWD:
1615 Assert(uErrorCode & X86_TRAP_PF_RW);
1616 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1617 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1618 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1619 break;
1620
1621 case OP_LODSB:
1622 case OP_LODSWD:
1623 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1624 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1625 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1626 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1627 break;
1628
1629 case OP_CMP:
1630 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1631 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1632 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1633 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1634 break;
1635
1636 case OP_AND:
1637 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1638 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1639 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1640 break;
1641
1642 case OP_OR:
1643 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1644 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1645 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1646 break;
1647
1648 case OP_XOR:
1649 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1650 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1651 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1652 break;
1653
1654 case OP_TEST:
1655 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1656 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1657 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1658 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1659 break;
1660
1661 case OP_BT:
1662 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1663 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1664 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1665 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1666 break;
1667
1668 case OP_XCHG:
1669 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1670 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1671 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1672 break;
1673
1674
1675 /*
1676 * The instruction isn't supported. Hand it on to ring-3.
1677 */
1678 default:
1679 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1680 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1681 break;
1682 }
1683
1684 /*
1685 * On success advance EIP.
1686 */
1687 if (rc == VINF_SUCCESS)
1688 pCtxCore->rip += cbOp;
1689 else
1690 {
1691 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1692#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1693 switch (rc)
1694 {
1695 case VINF_IOM_R3_MMIO_READ:
1696 case VINF_IOM_R3_MMIO_READ_WRITE:
1697 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1698 break;
1699 case VINF_IOM_R3_MMIO_WRITE:
1700 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1701 break;
1702 }
1703#endif
1704 }
1705
1706 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1707 iomMmioReleaseRange(pVM, pRange);
1708 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1709 return rc;
1710}
1711
1712/**
1713 * \#PF Handler callback for MMIO ranges.
1714 *
1715 * @returns VBox status code (appropriate for GC return).
1716 * @param pVM Pointer to the VM.
1717 * @param uErrorCode CPU Error code.
1718 * @param pCtxCore Trap register frame.
1719 * @param pvFault The fault address (cr2).
1720 * @param GCPhysFault The GC physical address corresponding to pvFault.
1721 * @param pvUser Pointer to the MMIO ring-3 range entry.
1722 */
1723VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1724{
1725 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1726 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1727 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, VMMGetCpu(pVM), (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1728 return VBOXSTRICTRC_VAL(rcStrict);
1729}
1730
1731/**
1732 * Physical access handler for MMIO ranges.
1733 *
1734 * @returns VBox status code (appropriate for GC return).
1735 * @param pVM Pointer to the VM.
1736 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1737 * @param uErrorCode CPU Error code.
1738 * @param pCtxCore Trap register frame.
1739 * @param GCPhysFault The GC physical address.
1740 */
1741VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1742{
1743 int rc2 = IOM_LOCK(pVM); NOREF(rc2);
1744#ifndef IN_RING3
1745 if (rc2 == VERR_SEM_BUSY)
1746 return VINF_IOM_R3_MMIO_READ_WRITE;
1747#endif
1748 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault,
1749 iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1750 IOM_UNLOCK(pVM);
1751 return VBOXSTRICTRC_VAL(rcStrict);
1752}
1753
1754
1755#ifdef IN_RING3
1756/**
1757 * \#PF Handler callback for MMIO ranges.
1758 *
1759 * @returns VINF_SUCCESS if the handler have carried out the operation.
1760 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1761 * @param pVM Pointer to the VM.
1762 * @param GCPhys The physical address the guest is writing to.
1763 * @param pvPhys The HC mapping of that address.
1764 * @param pvBuf What the guest is reading/writing.
1765 * @param cbBuf How much it's reading/writing.
1766 * @param enmAccessType The access type.
1767 * @param pvUser Pointer to the MMIO range entry.
1768 */
1769DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf,
1770 PGMACCESSTYPE enmAccessType, void *pvUser)
1771{
1772 PVMCPU pVCpu = VMMGetCpu(pVM);
1773 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1774 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1775
1776 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1777 AssertPtr(pRange);
1778 NOREF(pvPhys);
1779
1780 /*
1781 * Validate the range.
1782 */
1783 int rc = IOM_LOCK(pVM);
1784 AssertRC(rc);
1785 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1786
1787 /*
1788 * Perform locking.
1789 */
1790 iomMmioRetainRange(pRange);
1791 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1792 IOM_UNLOCK(pVM);
1793 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1794 if (rc != VINF_SUCCESS)
1795 {
1796 iomMmioReleaseRange(pVM, pRange);
1797 return rc;
1798 }
1799
1800 /*
1801 * Perform the access.
1802 */
1803 if (enmAccessType == PGMACCESSTYPE_READ)
1804 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1805 else
1806 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1807
1808 AssertRC(rc);
1809 iomMmioReleaseRange(pVM, pRange);
1810 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1811 return rc;
1812}
1813#endif /* IN_RING3 */
1814
1815
1816/**
1817 * Reads a MMIO register.
1818 *
1819 * @returns VBox status code.
1820 *
1821 * @param pVM Pointer to the VM.
1822 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1823 * @param GCPhys The physical address to read.
1824 * @param pu32Value Where to store the value read.
1825 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1826 */
1827VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1828{
1829 /* Take the IOM lock before performing any MMIO. */
1830 VBOXSTRICTRC rc = IOM_LOCK(pVM);
1831#ifndef IN_RING3
1832 if (rc == VERR_SEM_BUSY)
1833 return VINF_IOM_R3_MMIO_WRITE;
1834#endif
1835 AssertRC(VBOXSTRICTRC_VAL(rc));
1836#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1837 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
1838#endif
1839
1840 /*
1841 * Lookup the current context range node and statistics.
1842 */
1843 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1844 if (!pRange)
1845 {
1846 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1847 IOM_UNLOCK(pVM);
1848 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1849 }
1850#ifdef VBOX_WITH_STATISTICS
1851 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
1852 if (!pStats)
1853 {
1854 IOM_UNLOCK(pVM);
1855# ifdef IN_RING3
1856 return VERR_NO_MEMORY;
1857# else
1858 return VINF_IOM_R3_MMIO_READ;
1859# endif
1860 }
1861 STAM_COUNTER_INC(&pStats->Accesses);
1862#endif /* VBOX_WITH_STATISTICS */
1863
1864 if (pRange->CTX_SUFF(pfnReadCallback))
1865 {
1866 /*
1867 * Perform locking.
1868 */
1869 iomMmioRetainRange(pRange);
1870 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1871 IOM_UNLOCK(pVM);
1872 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
1873 if (rc != VINF_SUCCESS)
1874 {
1875 iomMmioReleaseRange(pVM, pRange);
1876 return rc;
1877 }
1878
1879 /*
1880 * Perform the read and deal with the result.
1881 */
1882 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1883 if ( (cbValue == 4 && !(GCPhys & 3))
1884 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
1885 || (cbValue == 8 && !(GCPhys & 7)) )
1886 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
1887 pu32Value, (unsigned)cbValue);
1888 else
1889 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
1890 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1891 switch (VBOXSTRICTRC_VAL(rc))
1892 {
1893 case VINF_SUCCESS:
1894 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1895 iomMmioReleaseRange(pVM, pRange);
1896 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1897 return rc;
1898#ifndef IN_RING3
1899 case VINF_IOM_R3_MMIO_READ:
1900 case VINF_IOM_R3_MMIO_READ_WRITE:
1901 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1902#endif
1903 default:
1904 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1905 iomMmioReleaseRange(pVM, pRange);
1906 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1907 return rc;
1908
1909 case VINF_IOM_MMIO_UNUSED_00:
1910 iomMMIODoRead00s(pu32Value, cbValue);
1911 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1912 iomMmioReleaseRange(pVM, pRange);
1913 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1914 return VINF_SUCCESS;
1915
1916 case VINF_IOM_MMIO_UNUSED_FF:
1917 iomMMIODoReadFFs(pu32Value, cbValue);
1918 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1919 iomMmioReleaseRange(pVM, pRange);
1920 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1921 return VINF_SUCCESS;
1922 }
1923 /* not reached */
1924 }
1925#ifndef IN_RING3
1926 if (pRange->pfnReadCallbackR3)
1927 {
1928 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1929 IOM_UNLOCK(pVM);
1930 return VINF_IOM_R3_MMIO_READ;
1931 }
1932#endif
1933
1934 /*
1935 * Unassigned memory - this is actually not supposed t happen...
1936 */
1937 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1938 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1939 iomMMIODoReadFFs(pu32Value, cbValue);
1940 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1941 IOM_UNLOCK(pVM);
1942 return VINF_SUCCESS;
1943}
1944
1945
1946/**
1947 * Writes to a MMIO register.
1948 *
1949 * @returns VBox status code.
1950 *
1951 * @param pVM Pointer to the VM.
1952 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1953 * @param GCPhys The physical address to write to.
1954 * @param u32Value The value to write.
1955 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1956 */
1957VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1958{
1959 /* Take the IOM lock before performing any MMIO. */
1960 VBOXSTRICTRC rc = IOM_LOCK(pVM);
1961#ifndef IN_RING3
1962 if (rc == VERR_SEM_BUSY)
1963 return VINF_IOM_R3_MMIO_WRITE;
1964#endif
1965 AssertRC(VBOXSTRICTRC_VAL(rc));
1966#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1967 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1968#endif
1969
1970 /*
1971 * Lookup the current context range node.
1972 */
1973 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1974 if (!pRange)
1975 {
1976 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1977 IOM_UNLOCK(pVM);
1978 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1979 }
1980#ifdef VBOX_WITH_STATISTICS
1981 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
1982 if (!pStats)
1983 {
1984 IOM_UNLOCK(pVM);
1985# ifdef IN_RING3
1986 return VERR_NO_MEMORY;
1987# else
1988 return VINF_IOM_R3_MMIO_WRITE;
1989# endif
1990 }
1991 STAM_COUNTER_INC(&pStats->Accesses);
1992#endif /* VBOX_WITH_STATISTICS */
1993
1994 if (pRange->CTX_SUFF(pfnWriteCallback))
1995 {
1996 /*
1997 * Perform locking.
1998 */
1999 iomMmioRetainRange(pRange);
2000 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2001 IOM_UNLOCK(pVM);
2002 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2003 if (rc != VINF_SUCCESS)
2004 {
2005 iomMmioReleaseRange(pVM, pRange);
2006 return rc;
2007 }
2008
2009 /*
2010 * Perform the write.
2011 */
2012 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2013 if ( (cbValue == 4 && !(GCPhys & 3))
2014 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2015 || (cbValue == 8 && !(GCPhys & 7)) )
2016 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2017 GCPhys, &u32Value, (unsigned)cbValue);
2018 else
2019 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2020 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2021#ifndef IN_RING3
2022 if ( rc == VINF_IOM_R3_MMIO_WRITE
2023 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2024 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2025#endif
2026 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2027 iomMmioReleaseRange(pVM, pRange);
2028 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2029 return rc;
2030 }
2031#ifndef IN_RING3
2032 if (pRange->pfnWriteCallbackR3)
2033 {
2034 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2035 IOM_UNLOCK(pVM);
2036 return VINF_IOM_R3_MMIO_WRITE;
2037 }
2038#endif
2039
2040 /*
2041 * No write handler, nothing to do.
2042 */
2043 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2044 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2045 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2046 IOM_UNLOCK(pVM);
2047 return VINF_SUCCESS;
2048}
2049
2050
2051/**
2052 * [REP*] INSB/INSW/INSD
2053 * ES:EDI,DX[,ECX]
2054 *
2055 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2056 *
2057 * @returns Strict VBox status code. Informational status codes other than the one documented
2058 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2059 * @retval VINF_SUCCESS Success.
2060 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2061 * status code must be passed on to EM.
2062 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2063 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2064 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2065 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2066 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2067 *
2068 * @param pVM The virtual machine.
2069 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2070 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2071 * @param uPort IO Port
2072 * @param uPrefix IO instruction prefix
2073 * @param enmAddrMode The address mode.
2074 * @param cbTransfer Size of transfer unit
2075 */
2076VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2077 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2078{
2079 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2080
2081 /*
2082 * We do not support REPNE or decrementing destination
2083 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2084 */
2085 if ( (uPrefix & DISPREFIX_REPNE)
2086 || pRegFrame->eflags.Bits.u1DF)
2087 return VINF_EM_RAW_EMULATE_INSTR;
2088
2089 /*
2090 * Get bytes/words/dwords count to transfer.
2091 */
2092 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2093 RTGCUINTREG cTransfers = 1;
2094 if (uPrefix & DISPREFIX_REP)
2095 {
2096#ifndef IN_RC
2097 if ( CPUMIsGuestIn64BitCode(pVCpu)
2098 && pRegFrame->rcx >= _4G)
2099 return VINF_EM_RAW_EMULATE_INSTR;
2100#endif
2101 cTransfers = pRegFrame->rcx & fAddrMask;
2102 if (!cTransfers)
2103 return VINF_SUCCESS;
2104 }
2105
2106 /* Convert destination address es:edi. */
2107 RTGCPTR GCPtrDst;
2108 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2109 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2110 &GCPtrDst);
2111 if (RT_FAILURE(rc2))
2112 {
2113 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2114 return VINF_EM_RAW_EMULATE_INSTR;
2115 }
2116
2117 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2118 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2119 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2120 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2121 if (rc2 != VINF_SUCCESS)
2122 {
2123 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2124 return VINF_EM_RAW_EMULATE_INSTR;
2125 }
2126
2127 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2128 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2129 if (cTransfers > 1)
2130 {
2131 /* If the device supports string transfers, ask it to do as
2132 * much as it wants. The rest is done with single-word transfers. */
2133 const RTGCUINTREG cTransfersOrg = cTransfers;
2134 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, &GCPtrDst, &cTransfers, cbTransfer);
2135 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2136 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2137 | (pRegFrame->rdi & ~fAddrMask);
2138 }
2139
2140#ifdef IN_RC
2141 MMGCRamRegisterTrapHandler(pVM);
2142#endif
2143 while (cTransfers && rcStrict == VINF_SUCCESS)
2144 {
2145 uint32_t u32Value;
2146 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2147 if (!IOM_SUCCESS(rcStrict))
2148 break;
2149 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2150 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2151 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2152 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2153 | (pRegFrame->rdi & ~fAddrMask);
2154 cTransfers--;
2155 }
2156#ifdef IN_RC
2157 MMGCRamDeregisterTrapHandler(pVM);
2158#endif
2159
2160 /* Update rcx on exit. */
2161 if (uPrefix & DISPREFIX_REP)
2162 pRegFrame->rcx = (cTransfers & fAddrMask)
2163 | (pRegFrame->rcx & ~fAddrMask);
2164
2165 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2166 return rcStrict;
2167}
2168
2169
2170/**
2171 * [REP*] INSB/INSW/INSD
2172 * ES:EDI,DX[,ECX]
2173 *
2174 * @returns Strict VBox status code. Informational status codes other than the one documented
2175 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2176 * @retval VINF_SUCCESS Success.
2177 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2178 * status code must be passed on to EM.
2179 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2180 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2181 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2182 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2183 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2184 *
2185 * @param pVM The virtual machine.
2186 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2187 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2188 * @param pCpu Disassembler CPU state.
2189 */
2190VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2191{
2192 /*
2193 * Get port number directly from the register (no need to bother the
2194 * disassembler). And get the I/O register size from the opcode / prefix.
2195 */
2196 RTIOPORT Port = pRegFrame->edx & 0xffff;
2197 unsigned cb = 0;
2198 if (pCpu->pCurInstr->uOpcode == OP_INSB)
2199 cb = 1;
2200 else
2201 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2202
2203 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2204 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2205 {
2206 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2207 return rcStrict;
2208 }
2209
2210 return IOMInterpretINSEx(pVM, pVCpu, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2211}
2212
2213
2214/**
2215 * [REP*] OUTSB/OUTSW/OUTSD
2216 * DS:ESI,DX[,ECX]
2217 *
2218 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2219 *
2220 * @returns Strict VBox status code. Informational status codes other than the one documented
2221 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2222 * @retval VINF_SUCCESS Success.
2223 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2224 * status code must be passed on to EM.
2225 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2226 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2227 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2228 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2229 *
2230 * @param pVM The virtual machine.
2231 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2232 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2233 * @param uPort IO Port
2234 * @param uPrefix IO instruction prefix
2235 * @param enmAddrMode The address mode.
2236 * @param cbTransfer Size of transfer unit
2237 */
2238VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2239 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2240{
2241 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2242
2243 /*
2244 * We do not support segment prefixes, REPNE or
2245 * decrementing source pointer.
2246 */
2247 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2248 || pRegFrame->eflags.Bits.u1DF)
2249 return VINF_EM_RAW_EMULATE_INSTR;
2250
2251 /*
2252 * Get bytes/words/dwords count to transfer.
2253 */
2254 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2255 RTGCUINTREG cTransfers = 1;
2256 if (uPrefix & DISPREFIX_REP)
2257 {
2258#ifndef IN_RC
2259 if ( CPUMIsGuestIn64BitCode(pVCpu)
2260 && pRegFrame->rcx >= _4G)
2261 return VINF_EM_RAW_EMULATE_INSTR;
2262#endif
2263 cTransfers = pRegFrame->rcx & fAddrMask;
2264 if (!cTransfers)
2265 return VINF_SUCCESS;
2266 }
2267
2268 /* Convert source address ds:esi. */
2269 RTGCPTR GCPtrSrc;
2270 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2271 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2272 &GCPtrSrc);
2273 if (RT_FAILURE(rc2))
2274 {
2275 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2276 return VINF_EM_RAW_EMULATE_INSTR;
2277 }
2278
2279 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2280 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2281 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2282 (cpl == 3) ? X86_PTE_US : 0);
2283 if (rc2 != VINF_SUCCESS)
2284 {
2285 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2286 return VINF_EM_RAW_EMULATE_INSTR;
2287 }
2288
2289 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2290 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2291 if (cTransfers > 1)
2292 {
2293 /*
2294 * If the device supports string transfers, ask it to do as
2295 * much as it wants. The rest is done with single-word transfers.
2296 */
2297 const RTGCUINTREG cTransfersOrg = cTransfers;
2298 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
2299 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2300 pRegFrame->rsi = ((pRegFrame->rsi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2301 | (pRegFrame->rsi & ~fAddrMask);
2302 }
2303
2304#ifdef IN_RC
2305 MMGCRamRegisterTrapHandler(pVM);
2306#endif
2307
2308 while (cTransfers && rcStrict == VINF_SUCCESS)
2309 {
2310 uint32_t u32Value = 0;
2311 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2312 if (rcStrict != VINF_SUCCESS)
2313 break;
2314 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2315 if (!IOM_SUCCESS(rcStrict))
2316 break;
2317 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2318 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2319 | (pRegFrame->rsi & ~fAddrMask);
2320 cTransfers--;
2321 }
2322
2323#ifdef IN_RC
2324 MMGCRamDeregisterTrapHandler(pVM);
2325#endif
2326
2327 /* Update rcx on exit. */
2328 if (uPrefix & DISPREFIX_REP)
2329 pRegFrame->rcx = (cTransfers & fAddrMask)
2330 | (pRegFrame->rcx & ~fAddrMask);
2331
2332 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2333 return rcStrict;
2334}
2335
2336
2337/**
2338 * [REP*] OUTSB/OUTSW/OUTSD
2339 * DS:ESI,DX[,ECX]
2340 *
2341 * @returns Strict VBox status code. Informational status codes other than the one documented
2342 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2343 * @retval VINF_SUCCESS Success.
2344 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2345 * status code must be passed on to EM.
2346 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2347 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
2348 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2349 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2350 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2351 *
2352 * @param pVM The virtual machine.
2353 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2354 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2355 * @param pCpu Disassembler CPU state.
2356 */
2357VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2358{
2359 /*
2360 * Get port number from the first parameter.
2361 * And get the I/O register size from the opcode / prefix.
2362 */
2363 uint64_t Port = 0;
2364 unsigned cb = 0;
2365 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &Port, &cb);
2366 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
2367 if (pCpu->pCurInstr->uOpcode == OP_OUTSB)
2368 cb = 1;
2369 else
2370 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2371
2372 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2373 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2374 {
2375 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2376 return rcStrict;
2377 }
2378
2379 return IOMInterpretOUTSEx(pVM, pVCpu, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2380}
2381
2382#ifndef IN_RC
2383
2384/**
2385 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2386 *
2387 * (This is a special optimization used by the VGA device.)
2388 *
2389 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2390 * remapping is made,.
2391 *
2392 * @param pVM The virtual machine.
2393 * @param GCPhys The address of the MMIO page to be changed.
2394 * @param GCPhysRemapped The address of the MMIO2 page.
2395 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2396 * for the time being.
2397 */
2398VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2399{
2400 /* Currently only called from the VGA device during MMIO. */
2401 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2402 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2403 PVMCPU pVCpu = VMMGetCpu(pVM);
2404
2405 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2406 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2407 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2408 && !HMIsNestedPagingActive(pVM)))
2409 return VINF_SUCCESS; /* ignore */
2410
2411 int rc = IOM_LOCK(pVM);
2412 if (RT_FAILURE(rc))
2413 return VINF_SUCCESS; /* better luck the next time around */
2414
2415 /*
2416 * Lookup the context range node the page belongs to.
2417 */
2418 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2419 AssertMsgReturn(pRange,
2420 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2421
2422 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2423 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2424
2425 /*
2426 * Do the aliasing; page align the addresses since PGM is picky.
2427 */
2428 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2429 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2430
2431 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2432
2433 IOM_UNLOCK(pVM);
2434 AssertRCReturn(rc, rc);
2435
2436 /*
2437 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2438 * can simply prefetch it.
2439 *
2440 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2441 */
2442#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2443# ifdef VBOX_STRICT
2444 uint64_t fFlags;
2445 RTHCPHYS HCPhys;
2446 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2447 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2448# endif
2449#endif
2450 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2451 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2452 return VINF_SUCCESS;
2453}
2454
2455
2456/**
2457 * Mapping a HC page in place of an MMIO page for direct access.
2458 *
2459 * (This is a special optimization used by the APIC in the VT-x case.)
2460 *
2461 * @returns VBox status code.
2462 *
2463 * @param pVM Pointer to the VM.
2464 * @param pVCpu Pointer to the VMCPU.
2465 * @param GCPhys The address of the MMIO page to be changed.
2466 * @param HCPhys The address of the host physical page.
2467 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2468 * for the time being.
2469 */
2470VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2471{
2472 /* Currently only called from VT-x code during a page fault. */
2473 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2474
2475 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2476 Assert(HMIsEnabled(pVM));
2477
2478 /*
2479 * Lookup the context range node the page belongs to.
2480 */
2481#ifdef VBOX_STRICT
2482 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2483 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2484 AssertMsgReturn(pRange,
2485 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2486 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2487 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2488#endif
2489
2490 /*
2491 * Do the aliasing; page align the addresses since PGM is picky.
2492 */
2493 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2494 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2495
2496 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2497 AssertRCReturn(rc, rc);
2498
2499 /*
2500 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2501 * can simply prefetch it.
2502 *
2503 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2504 */
2505 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2506 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2507 return VINF_SUCCESS;
2508}
2509
2510
2511/**
2512 * Reset a previously modified MMIO region; restore the access flags.
2513 *
2514 * @returns VBox status code.
2515 *
2516 * @param pVM The virtual machine.
2517 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2518 */
2519VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2520{
2521 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2522
2523 PVMCPU pVCpu = VMMGetCpu(pVM);
2524
2525 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2526 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2527 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2528 && !HMIsNestedPagingActive(pVM)))
2529 return VINF_SUCCESS; /* ignore */
2530
2531 /*
2532 * Lookup the context range node the page belongs to.
2533 */
2534#ifdef VBOX_STRICT
2535 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2536 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2537 AssertMsgReturn(pRange,
2538 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2539 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2540 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2541#endif
2542
2543 /*
2544 * Call PGM to do the job work.
2545 *
2546 * After the call, all the pages should be non-present... unless there is
2547 * a page pool flush pending (unlikely).
2548 */
2549 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2550 AssertRC(rc);
2551
2552#ifdef VBOX_STRICT
2553 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2554 {
2555 uint32_t cb = pRange->cb;
2556 GCPhys = pRange->GCPhys;
2557 while (cb)
2558 {
2559 uint64_t fFlags;
2560 RTHCPHYS HCPhys;
2561 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2562 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2563 cb -= PAGE_SIZE;
2564 GCPhys += PAGE_SIZE;
2565 }
2566 }
2567#endif
2568 return rc;
2569}
2570
2571#endif /* !IN_RC */
2572
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette