VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 62643

Last change on this file since 62643 was 62601, checked in by vboxsync, 8 years ago

VMM: Unused parameters.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.0 KB
Line 
1/* $Id: IOMAllMMIO.cpp 62601 2016-07-27 15:46:22Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49
50#ifndef IN_RING3
51/**
52 * Defers a pending MMIO write to ring-3.
53 *
54 * @returns VINF_IOM_R3_MMIO_COMMIT_WRITE
55 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
56 * @param GCPhys The write address.
57 * @param pvBuf The bytes being written.
58 * @param cbBuf How many bytes.
59 * @param pRange The range, if resolved.
60 */
61static VBOXSTRICTRC iomMmioRing3WritePending(PVMCPU pVCpu, RTGCPHYS GCPhys, void const *pvBuf, size_t cbBuf, PIOMMMIORANGE pRange)
62{
63 Log5(("iomMmioRing3WritePending: %RGp LB %#x\n", GCPhys, cbBuf));
64 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
65 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys;
66 AssertReturn(cbBuf <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
67 pVCpu->iom.s.PendingMmioWrite.cbValue = (uint32_t)cbBuf;
68 memcpy(pVCpu->iom.s.PendingMmioWrite.abValue, pvBuf, cbBuf);
69 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
70 RT_NOREF_PV(pRange);
71 return VINF_IOM_R3_MMIO_COMMIT_WRITE;
72}
73#endif
74
75
76/**
77 * Deals with complicated MMIO writes.
78 *
79 * Complicated means unaligned or non-dword/qword sized accesses depending on
80 * the MMIO region's access mode flags.
81 *
82 * @returns Strict VBox status code. Any EM scheduling status code,
83 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
84 * VINF_IOM_R3_MMIO_READ may be returned.
85 *
86 * @param pVM The cross context VM structure.
87 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
88 * @param pRange The range to write to.
89 * @param GCPhys The physical address to start writing.
90 * @param pvValue Where to store the value.
91 * @param cbValue The size of the value to write.
92 */
93static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
94 void const *pvValue, unsigned cbValue)
95{
96 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
97 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
98 VERR_IOM_MMIO_IPE_1);
99 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
100 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
101 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
102 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
103
104 /*
105 * Do debug stop if requested.
106 */
107 int rc = VINF_SUCCESS; NOREF(pVM);
108#ifdef VBOX_STRICT
109 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
110 {
111# ifdef IN_RING3
112 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
113 R3STRING(pRange->pszDesc)));
114 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
115 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
116 if (rc == VERR_DBGF_NOT_ATTACHED)
117 rc = VINF_SUCCESS;
118# else
119 return VINF_IOM_R3_MMIO_WRITE;
120# endif
121 }
122#endif
123
124 /*
125 * Check if we should ignore the write.
126 */
127 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
128 {
129 Assert(cbValue != 4 || (GCPhys & 3));
130 return VINF_SUCCESS;
131 }
132 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
133 {
134 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
135 return VINF_SUCCESS;
136 }
137
138 /*
139 * Split and conquer.
140 */
141 for (;;)
142 {
143 unsigned const offAccess = GCPhys & 3;
144 unsigned cbThisPart = 4 - offAccess;
145 if (cbThisPart > cbValue)
146 cbThisPart = cbValue;
147
148 /*
149 * Get the missing bits (if any).
150 */
151 uint32_t u32MissingValue = 0;
152 if (fReadMissing && cbThisPart != 4)
153 {
154 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
155 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
156 switch (rc2)
157 {
158 case VINF_SUCCESS:
159 break;
160 case VINF_IOM_MMIO_UNUSED_FF:
161 u32MissingValue = UINT32_C(0xffffffff);
162 break;
163 case VINF_IOM_MMIO_UNUSED_00:
164 u32MissingValue = 0;
165 break;
166#ifndef IN_RING3
167 case VINF_IOM_R3_MMIO_READ:
168 case VINF_IOM_R3_MMIO_READ_WRITE:
169 case VINF_IOM_R3_MMIO_WRITE:
170 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
171 rc2 = VBOXSTRICTRC_TODO(iomMmioRing3WritePending(pVCpu, GCPhys, pvValue, cbValue, pRange));
172 if (rc == VINF_SUCCESS || rc2 < rc)
173 rc = rc2;
174 return rc;
175#endif
176 default:
177 if (RT_FAILURE(rc2))
178 {
179 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
180 return rc2;
181 }
182 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
183 if (rc == VINF_SUCCESS || rc2 < rc)
184 rc = rc2;
185 break;
186 }
187 }
188
189 /*
190 * Merge missing and given bits.
191 */
192 uint32_t u32GivenMask;
193 uint32_t u32GivenValue;
194 switch (cbThisPart)
195 {
196 case 1:
197 u32GivenValue = *(uint8_t const *)pvValue;
198 u32GivenMask = UINT32_C(0x000000ff);
199 break;
200 case 2:
201 u32GivenValue = *(uint16_t const *)pvValue;
202 u32GivenMask = UINT32_C(0x0000ffff);
203 break;
204 case 3:
205 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
206 ((uint8_t const *)pvValue)[2], 0);
207 u32GivenMask = UINT32_C(0x00ffffff);
208 break;
209 case 4:
210 u32GivenValue = *(uint32_t const *)pvValue;
211 u32GivenMask = UINT32_C(0xffffffff);
212 break;
213 default:
214 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
215 }
216 if (offAccess)
217 {
218 u32GivenValue <<= offAccess * 8;
219 u32GivenMask <<= offAccess * 8;
220 }
221
222 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
223 | (u32GivenValue & u32GivenMask);
224
225 /*
226 * Do DWORD write to the device.
227 */
228 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
229 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
230 switch (rc2)
231 {
232 case VINF_SUCCESS:
233 break;
234#ifndef IN_RING3
235 case VINF_IOM_R3_MMIO_READ:
236 case VINF_IOM_R3_MMIO_READ_WRITE:
237 case VINF_IOM_R3_MMIO_WRITE:
238 Log3(("iomMMIODoComplicatedWrite: deferring GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
239 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
240 AssertReturn(cbValue + (GCPhys & 3) <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
241 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys & ~(RTGCPHYS)3;
242 pVCpu->iom.s.PendingMmioWrite.cbValue = cbValue + (GCPhys & 3);
243 *(uint32_t *)pVCpu->iom.s.PendingMmioWrite.abValue = u32Value;
244 if (cbValue > cbThisPart)
245 memcpy(&pVCpu->iom.s.PendingMmioWrite.abValue[4],
246 (uint8_t const *)pvValue + cbThisPart, cbValue - cbThisPart);
247 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
248 if (rc == VINF_SUCCESS)
249 rc = VINF_IOM_R3_MMIO_COMMIT_WRITE;
250 return rc2;
251#endif
252 default:
253 if (RT_FAILURE(rc2))
254 {
255 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
256 return rc2;
257 }
258 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
259 if (rc == VINF_SUCCESS || rc2 < rc)
260 rc = rc2;
261 break;
262 }
263
264 /*
265 * Advance.
266 */
267 cbValue -= cbThisPart;
268 if (!cbValue)
269 break;
270 GCPhys += cbThisPart;
271 pvValue = (uint8_t const *)pvValue + cbThisPart;
272 }
273
274 return rc;
275}
276
277
278
279
280/**
281 * Wrapper which does the write and updates range statistics when such are enabled.
282 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
283 */
284static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
285 const void *pvData, unsigned cb)
286{
287#ifdef VBOX_WITH_STATISTICS
288 int rcSem = IOM_LOCK_SHARED(pVM);
289 if (rcSem == VERR_SEM_BUSY)
290 return VINF_IOM_R3_MMIO_WRITE;
291 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
292 if (!pStats)
293# ifdef IN_RING3
294 return VERR_NO_MEMORY;
295# else
296 return VINF_IOM_R3_MMIO_WRITE;
297# endif
298 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
299#else
300 NOREF(pVCpu);
301#endif
302
303 VBOXSTRICTRC rcStrict;
304 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
305 {
306 if ( (cb == 4 && !(GCPhysFault & 3))
307 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
308 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
309 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
310 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
311 else
312 rcStrict = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhysFault, pvData, cb);
313 }
314 else
315 rcStrict = VINF_SUCCESS;
316
317 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
318 STAM_COUNTER_INC(&pStats->Accesses);
319 return rcStrict;
320}
321
322
323/**
324 * Deals with complicated MMIO reads.
325 *
326 * Complicated means unaligned or non-dword/qword sized accesses depending on
327 * the MMIO region's access mode flags.
328 *
329 * @returns Strict VBox status code. Any EM scheduling status code,
330 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
331 * VINF_IOM_R3_MMIO_WRITE may be returned.
332 *
333 * @param pVM The cross context VM structure.
334 * @param pRange The range to read from.
335 * @param GCPhys The physical address to start reading.
336 * @param pvValue Where to store the value.
337 * @param cbValue The size of the value to read.
338 */
339static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
340{
341 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
342 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
343 VERR_IOM_MMIO_IPE_1);
344 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
345 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
346
347 /*
348 * Do debug stop if requested.
349 */
350 int rc = VINF_SUCCESS; NOREF(pVM);
351#ifdef VBOX_STRICT
352 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
353 {
354# ifdef IN_RING3
355 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
356 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
357 if (rc == VERR_DBGF_NOT_ATTACHED)
358 rc = VINF_SUCCESS;
359# else
360 return VINF_IOM_R3_MMIO_READ;
361# endif
362 }
363#endif
364
365 /*
366 * Split and conquer.
367 */
368 for (;;)
369 {
370 /*
371 * Do DWORD read from the device.
372 */
373 uint32_t u32Value;
374 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
375 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
376 switch (rc2)
377 {
378 case VINF_SUCCESS:
379 break;
380 case VINF_IOM_MMIO_UNUSED_FF:
381 u32Value = UINT32_C(0xffffffff);
382 break;
383 case VINF_IOM_MMIO_UNUSED_00:
384 u32Value = 0;
385 break;
386 case VINF_IOM_R3_MMIO_READ:
387 case VINF_IOM_R3_MMIO_READ_WRITE:
388 case VINF_IOM_R3_MMIO_WRITE:
389 /** @todo What if we've split a transfer and already read
390 * something? Since reads can have sideeffects we could be
391 * kind of screwed here... */
392 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
393 return rc2;
394 default:
395 if (RT_FAILURE(rc2))
396 {
397 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
398 return rc2;
399 }
400 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
401 if (rc == VINF_SUCCESS || rc2 < rc)
402 rc = rc2;
403 break;
404 }
405 u32Value >>= (GCPhys & 3) * 8;
406
407 /*
408 * Write what we've read.
409 */
410 unsigned cbThisPart = 4 - (GCPhys & 3);
411 if (cbThisPart > cbValue)
412 cbThisPart = cbValue;
413
414 switch (cbThisPart)
415 {
416 case 1:
417 *(uint8_t *)pvValue = (uint8_t)u32Value;
418 break;
419 case 2:
420 *(uint16_t *)pvValue = (uint16_t)u32Value;
421 break;
422 case 3:
423 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
424 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
425 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
426 break;
427 case 4:
428 *(uint32_t *)pvValue = u32Value;
429 break;
430 }
431
432 /*
433 * Advance.
434 */
435 cbValue -= cbThisPart;
436 if (!cbValue)
437 break;
438 GCPhys += cbThisPart;
439 pvValue = (uint8_t *)pvValue + cbThisPart;
440 }
441
442 return rc;
443}
444
445
446/**
447 * Implements VINF_IOM_MMIO_UNUSED_FF.
448 *
449 * @returns VINF_SUCCESS.
450 * @param pvValue Where to store the zeros.
451 * @param cbValue How many bytes to read.
452 */
453static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
454{
455 switch (cbValue)
456 {
457 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
458 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
459 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
460 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
461 default:
462 {
463 uint8_t *pb = (uint8_t *)pvValue;
464 while (cbValue--)
465 *pb++ = UINT8_C(0xff);
466 break;
467 }
468 }
469 return VINF_SUCCESS;
470}
471
472
473/**
474 * Implements VINF_IOM_MMIO_UNUSED_00.
475 *
476 * @returns VINF_SUCCESS.
477 * @param pvValue Where to store the zeros.
478 * @param cbValue How many bytes to read.
479 */
480static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
481{
482 switch (cbValue)
483 {
484 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
485 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
486 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
487 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
488 default:
489 {
490 uint8_t *pb = (uint8_t *)pvValue;
491 while (cbValue--)
492 *pb++ = UINT8_C(0x00);
493 break;
494 }
495 }
496 return VINF_SUCCESS;
497}
498
499
500/**
501 * Wrapper which does the read and updates range statistics when such are enabled.
502 */
503DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
504 void *pvValue, unsigned cbValue)
505{
506#ifdef VBOX_WITH_STATISTICS
507 int rcSem = IOM_LOCK_SHARED(pVM);
508 if (rcSem == VERR_SEM_BUSY)
509 return VINF_IOM_R3_MMIO_READ;
510 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
511 if (!pStats)
512# ifdef IN_RING3
513 return VERR_NO_MEMORY;
514# else
515 return VINF_IOM_R3_MMIO_READ;
516# endif
517 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
518#else
519 NOREF(pVCpu);
520#endif
521
522 VBOXSTRICTRC rcStrict;
523 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
524 {
525 if ( ( cbValue == 4
526 && !(GCPhys & 3))
527 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
528 || ( cbValue == 8
529 && !(GCPhys & 7)
530 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
531 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
532 pvValue, cbValue);
533 else
534 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
535 }
536 else
537 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
538 if (rcStrict != VINF_SUCCESS)
539 {
540 switch (VBOXSTRICTRC_VAL(rcStrict))
541 {
542 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
543 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
544 }
545 }
546
547 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
548 STAM_COUNTER_INC(&pStats->Accesses);
549 return rcStrict;
550}
551
552
553/**
554 * Internal - statistics only.
555 */
556DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
557{
558#ifdef VBOX_WITH_STATISTICS
559 switch (cb)
560 {
561 case 1:
562 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
563 break;
564 case 2:
565 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
566 break;
567 case 4:
568 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
569 break;
570 case 8:
571 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
572 break;
573 default:
574 /* No way. */
575 AssertMsgFailed(("Invalid data length %d\n", cb));
576 break;
577 }
578#else
579 NOREF(pVM); NOREF(cb);
580#endif
581}
582
583
584
585/**
586 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
587 *
588 * @returns VBox status code (appropriate for GC return).
589 * @param pVM The cross context VM structure.
590 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
591 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
592 * any error code (the EPT misconfig hack).
593 * @param pCtxCore Trap register frame.
594 * @param GCPhysFault The GC physical address corresponding to pvFault.
595 * @param pvUser Pointer to the MMIO ring-3 range entry.
596 */
597static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
598 RTGCPHYS GCPhysFault, void *pvUser)
599{
600 int rc = IOM_LOCK_SHARED(pVM);
601#ifndef IN_RING3
602 if (rc == VERR_SEM_BUSY)
603 return VINF_IOM_R3_MMIO_READ_WRITE;
604#endif
605 AssertRC(rc);
606
607 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
608 Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
609
610 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
611 Assert(pRange);
612 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
613 iomMmioRetainRange(pRange);
614#ifndef VBOX_WITH_STATISTICS
615 IOM_UNLOCK_SHARED(pVM);
616
617#else
618 /*
619 * Locate the statistics.
620 */
621 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
622 if (!pStats)
623 {
624 iomMmioReleaseRange(pVM, pRange);
625# ifdef IN_RING3
626 return VERR_NO_MEMORY;
627# else
628 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
629 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
630 return VINF_IOM_R3_MMIO_READ_WRITE;
631# endif
632 }
633#endif
634
635#ifndef IN_RING3
636 /*
637 * Should we defer the request right away? This isn't usually the case, so
638 * do the simple test first and the try deal with uErrorCode being N/A.
639 */
640 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
641 || !pRange->CTX_SUFF(pfnReadCallback))
642 && ( uErrorCode == UINT32_MAX
643 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
644 : uErrorCode & X86_TRAP_PF_RW
645 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
646 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
647 )
648 )
649 )
650 {
651 if (uErrorCode & X86_TRAP_PF_RW)
652 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
653 else
654 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
655
656 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
657 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
658 iomMmioReleaseRange(pVM, pRange);
659 return VINF_IOM_R3_MMIO_READ_WRITE;
660 }
661#endif /* !IN_RING3 */
662
663 /*
664 * Retain the range and do locking.
665 */
666 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
667 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
668 if (rc != VINF_SUCCESS)
669 {
670 iomMmioReleaseRange(pVM, pRange);
671 return rc;
672 }
673
674 /*
675 * Let IEM call us back via iomMmioHandler.
676 */
677 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
678
679 NOREF(pCtxCore); NOREF(GCPhysFault);
680 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
681 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
682 iomMmioReleaseRange(pVM, pRange);
683 if (RT_SUCCESS(rcStrict))
684 return rcStrict;
685 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
686 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
687 {
688 Log(("IOM: Hit unsupported IEM feature!\n"));
689 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
690 }
691 return rcStrict;
692}
693
694
695/**
696 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
697 * \#PF access handler callback for MMIO pages.}
698 *
699 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
700 */
701DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
702 RTGCPHYS GCPhysFault, void *pvUser)
703{
704 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
705 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); NOREF(pvFault);
706 return iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
707}
708
709
710/**
711 * Physical access handler for MMIO ranges.
712 *
713 * @returns VBox status code (appropriate for GC return).
714 * @param pVM The cross context VM structure.
715 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
716 * @param uErrorCode CPU Error code.
717 * @param pCtxCore Trap register frame.
718 * @param GCPhysFault The GC physical address.
719 */
720VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
721{
722 /*
723 * We don't have a range here, so look it up before calling the common function.
724 */
725 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
726#ifndef IN_RING3
727 if (rc2 == VERR_SEM_BUSY)
728 return VINF_IOM_R3_MMIO_READ_WRITE;
729#endif
730 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
731 if (RT_UNLIKELY(!pRange))
732 {
733 IOM_UNLOCK_SHARED(pVM);
734 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
735 }
736 iomMmioRetainRange(pRange);
737 IOM_UNLOCK_SHARED(pVM);
738
739 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
740
741 iomMmioReleaseRange(pVM, pRange);
742 return VBOXSTRICTRC_VAL(rcStrict);
743}
744
745
746/**
747 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
748 *
749 * @remarks The @a pvUser argument points to the MMIO range entry.
750 */
751PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
752 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
753{
754 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
755 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
756
757 NOREF(pvPhys); NOREF(enmOrigin);
758 AssertPtr(pRange);
759 AssertMsg(cbBuf >= 1, ("%zu\n", cbBuf));
760
761
762#ifndef IN_RING3
763 /*
764 * If someone is doing FXSAVE, FXRSTOR, XSAVE, XRSTOR or other stuff dealing with
765 * large amounts of data, just go to ring-3 where we don't need to deal with partial
766 * successes. No chance any of these will be problematic read-modify-write stuff.
767 */
768 if (cbBuf > sizeof(pVCpu->iom.s.PendingMmioWrite.abValue))
769 return enmAccessType == PGMACCESSTYPE_WRITE ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
770#endif
771
772 /*
773 * Validate the range.
774 */
775 int rc = IOM_LOCK_SHARED(pVM);
776#ifndef IN_RING3
777 if (rc == VERR_SEM_BUSY)
778 {
779 if (enmAccessType == PGMACCESSTYPE_READ)
780 return VINF_IOM_R3_MMIO_READ;
781 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
782 return iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, NULL /*pRange*/);
783 }
784#endif
785 AssertRC(rc);
786 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
787
788 /*
789 * Perform locking.
790 */
791 iomMmioRetainRange(pRange);
792 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
793 IOM_UNLOCK_SHARED(pVM);
794 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
795 if (rcStrict == VINF_SUCCESS)
796 {
797 /*
798 * Perform the access.
799 */
800 if (enmAccessType == PGMACCESSTYPE_READ)
801 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
802 else
803 {
804 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
805#ifndef IN_RING3
806 if (rcStrict == VINF_IOM_R3_MMIO_WRITE)
807 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
808#endif
809 }
810
811 /* Check the return code. */
812#ifdef IN_RING3
813 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
814#else
815 AssertMsg( rcStrict == VINF_SUCCESS
816 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
817 || (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE && enmAccessType == PGMACCESSTYPE_WRITE)
818 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
819 || rcStrict == VINF_EM_DBG_STOP
820 || rcStrict == VINF_EM_DBG_EVENT
821 || rcStrict == VINF_EM_DBG_BREAKPOINT
822 || rcStrict == VINF_EM_OFF
823 || rcStrict == VINF_EM_SUSPEND
824 || rcStrict == VINF_EM_RESET
825 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
826 //|| rcStrict == VINF_EM_HALT /* ?? */
827 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
828 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
829#endif
830
831 iomMmioReleaseRange(pVM, pRange);
832 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
833 }
834#ifdef IN_RING3
835 else
836 iomMmioReleaseRange(pVM, pRange);
837#else
838 else
839 {
840 if (rcStrict == VINF_IOM_R3_MMIO_READ_WRITE)
841 {
842 if (enmAccessType == PGMACCESSTYPE_READ)
843 rcStrict = VINF_IOM_R3_MMIO_READ;
844 else
845 {
846 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
847 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
848 }
849 }
850 iomMmioReleaseRange(pVM, pRange);
851 }
852#endif
853 return rcStrict;
854}
855
856
857#ifdef IN_RING3 /* Only used by REM. */
858
859/**
860 * Reads a MMIO register.
861 *
862 * @returns VBox status code.
863 *
864 * @param pVM The cross context VM structure.
865 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
866 * @param GCPhys The physical address to read.
867 * @param pu32Value Where to store the value read.
868 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
869 */
870VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
871{
872 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
873 /* Take the IOM lock before performing any MMIO. */
874 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
875#ifndef IN_RING3
876 if (rc == VERR_SEM_BUSY)
877 return VINF_IOM_R3_MMIO_WRITE;
878#endif
879 AssertRC(VBOXSTRICTRC_VAL(rc));
880#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
881 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
882#endif
883
884 /*
885 * Lookup the current context range node and statistics.
886 */
887 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
888 if (!pRange)
889 {
890 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
891 IOM_UNLOCK_SHARED(pVM);
892 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
893 }
894 iomMmioRetainRange(pRange);
895#ifndef VBOX_WITH_STATISTICS
896 IOM_UNLOCK_SHARED(pVM);
897
898#else /* VBOX_WITH_STATISTICS */
899 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
900 if (!pStats)
901 {
902 iomMmioReleaseRange(pVM, pRange);
903# ifdef IN_RING3
904 return VERR_NO_MEMORY;
905# else
906 return VINF_IOM_R3_MMIO_READ;
907# endif
908 }
909 STAM_COUNTER_INC(&pStats->Accesses);
910#endif /* VBOX_WITH_STATISTICS */
911
912 if (pRange->CTX_SUFF(pfnReadCallback))
913 {
914 /*
915 * Perform locking.
916 */
917 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
918 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
919 if (rc != VINF_SUCCESS)
920 {
921 iomMmioReleaseRange(pVM, pRange);
922 return rc;
923 }
924
925 /*
926 * Perform the read and deal with the result.
927 */
928 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
929 if ( (cbValue == 4 && !(GCPhys & 3))
930 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
931 || (cbValue == 8 && !(GCPhys & 7)) )
932 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
933 pu32Value, (unsigned)cbValue);
934 else
935 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
936 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
937 switch (VBOXSTRICTRC_VAL(rc))
938 {
939 case VINF_SUCCESS:
940 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
941 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
942 iomMmioReleaseRange(pVM, pRange);
943 return rc;
944#ifndef IN_RING3
945 case VINF_IOM_R3_MMIO_READ:
946 case VINF_IOM_R3_MMIO_READ_WRITE:
947 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
948#endif
949 default:
950 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
951 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
952 iomMmioReleaseRange(pVM, pRange);
953 return rc;
954
955 case VINF_IOM_MMIO_UNUSED_00:
956 iomMMIODoRead00s(pu32Value, cbValue);
957 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
958 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
959 iomMmioReleaseRange(pVM, pRange);
960 return VINF_SUCCESS;
961
962 case VINF_IOM_MMIO_UNUSED_FF:
963 iomMMIODoReadFFs(pu32Value, cbValue);
964 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
965 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
966 iomMmioReleaseRange(pVM, pRange);
967 return VINF_SUCCESS;
968 }
969 /* not reached */
970 }
971#ifndef IN_RING3
972 if (pRange->pfnReadCallbackR3)
973 {
974 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
975 iomMmioReleaseRange(pVM, pRange);
976 return VINF_IOM_R3_MMIO_READ;
977 }
978#endif
979
980 /*
981 * Unassigned memory - this is actually not supposed t happen...
982 */
983 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
984 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
985 iomMMIODoReadFFs(pu32Value, cbValue);
986 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
987 iomMmioReleaseRange(pVM, pRange);
988 return VINF_SUCCESS;
989}
990
991
992/**
993 * Writes to a MMIO register.
994 *
995 * @returns VBox status code.
996 *
997 * @param pVM The cross context VM structure.
998 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
999 * @param GCPhys The physical address to write to.
1000 * @param u32Value The value to write.
1001 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1002 */
1003VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1004{
1005 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
1006 /* Take the IOM lock before performing any MMIO. */
1007 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
1008#ifndef IN_RING3
1009 if (rc == VERR_SEM_BUSY)
1010 return VINF_IOM_R3_MMIO_WRITE;
1011#endif
1012 AssertRC(VBOXSTRICTRC_VAL(rc));
1013#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1014 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1015#endif
1016
1017 /*
1018 * Lookup the current context range node.
1019 */
1020 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1021 if (!pRange)
1022 {
1023 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1024 IOM_UNLOCK_SHARED(pVM);
1025 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1026 }
1027 iomMmioRetainRange(pRange);
1028#ifndef VBOX_WITH_STATISTICS
1029 IOM_UNLOCK_SHARED(pVM);
1030
1031#else /* VBOX_WITH_STATISTICS */
1032 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
1033 if (!pStats)
1034 {
1035 iomMmioReleaseRange(pVM, pRange);
1036# ifdef IN_RING3
1037 return VERR_NO_MEMORY;
1038# else
1039 return VINF_IOM_R3_MMIO_WRITE;
1040# endif
1041 }
1042 STAM_COUNTER_INC(&pStats->Accesses);
1043#endif /* VBOX_WITH_STATISTICS */
1044
1045 if (pRange->CTX_SUFF(pfnWriteCallback))
1046 {
1047 /*
1048 * Perform locking.
1049 */
1050 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1051 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
1052 if (rc != VINF_SUCCESS)
1053 {
1054 iomMmioReleaseRange(pVM, pRange);
1055 return rc;
1056 }
1057
1058 /*
1059 * Perform the write.
1060 */
1061 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1062 if ( (cbValue == 4 && !(GCPhys & 3))
1063 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
1064 || (cbValue == 8 && !(GCPhys & 7)) )
1065 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1066 GCPhys, &u32Value, (unsigned)cbValue);
1067 else
1068 rc = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhys, &u32Value, (unsigned)cbValue);
1069 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1070#ifndef IN_RING3
1071 if ( rc == VINF_IOM_R3_MMIO_WRITE
1072 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
1073 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1074#endif
1075 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1076 iomMmioReleaseRange(pVM, pRange);
1077 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1078 return rc;
1079 }
1080#ifndef IN_RING3
1081 if (pRange->pfnWriteCallbackR3)
1082 {
1083 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1084 iomMmioReleaseRange(pVM, pRange);
1085 return VINF_IOM_R3_MMIO_WRITE;
1086 }
1087#endif
1088
1089 /*
1090 * No write handler, nothing to do.
1091 */
1092 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1093 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1094 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1095 iomMmioReleaseRange(pVM, pRange);
1096 return VINF_SUCCESS;
1097}
1098
1099#endif /* IN_RING3 - only used by REM. */
1100#ifndef IN_RC
1101
1102/**
1103 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1104 *
1105 * (This is a special optimization used by the VGA device.)
1106 *
1107 * @returns VBox status code. This API may return VINF_SUCCESS even if no
1108 * remapping is made,.
1109 *
1110 * @param pVM The cross context VM structure.
1111 * @param GCPhys The address of the MMIO page to be changed.
1112 * @param GCPhysRemapped The address of the MMIO2 page.
1113 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1114 * for the time being.
1115 */
1116VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1117{
1118# ifndef IEM_VERIFICATION_MODE_FULL
1119 /* Currently only called from the VGA device during MMIO. */
1120 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1121 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1122 PVMCPU pVCpu = VMMGetCpu(pVM);
1123
1124 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1125 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1126 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1127 && !HMIsNestedPagingActive(pVM)))
1128 return VINF_SUCCESS; /* ignore */
1129
1130 int rc = IOM_LOCK_SHARED(pVM);
1131 if (RT_FAILURE(rc))
1132 return VINF_SUCCESS; /* better luck the next time around */
1133
1134 /*
1135 * Lookup the context range node the page belongs to.
1136 */
1137 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1138 AssertMsgReturn(pRange,
1139 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1140
1141 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1142 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1143
1144 /*
1145 * Do the aliasing; page align the addresses since PGM is picky.
1146 */
1147 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1148 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1149
1150 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1151
1152 IOM_UNLOCK_SHARED(pVM);
1153 AssertRCReturn(rc, rc);
1154
1155 /*
1156 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1157 * can simply prefetch it.
1158 *
1159 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1160 */
1161# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1162# ifdef VBOX_STRICT
1163 uint64_t fFlags;
1164 RTHCPHYS HCPhys;
1165 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1166 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1167# endif
1168# endif
1169 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1170 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1171# endif /* !IEM_VERIFICATION_MODE_FULL */
1172 return VINF_SUCCESS;
1173}
1174
1175
1176# ifndef IEM_VERIFICATION_MODE_FULL
1177/**
1178 * Mapping a HC page in place of an MMIO page for direct access.
1179 *
1180 * (This is a special optimization used by the APIC in the VT-x case.)
1181 *
1182 * @returns VBox status code.
1183 *
1184 * @param pVM The cross context VM structure.
1185 * @param pVCpu The cross context virtual CPU structure.
1186 * @param GCPhys The address of the MMIO page to be changed.
1187 * @param HCPhys The address of the host physical page.
1188 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1189 * for the time being.
1190 */
1191VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
1192{
1193 /* Currently only called from VT-x code during a page fault. */
1194 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
1195
1196 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1197 Assert(HMIsEnabled(pVM));
1198
1199 /*
1200 * Lookup the context range node the page belongs to.
1201 */
1202# ifdef VBOX_STRICT
1203 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1204 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
1205 AssertMsgReturn(pRange,
1206 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1207 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1208 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1209# endif
1210
1211 /*
1212 * Do the aliasing; page align the addresses since PGM is picky.
1213 */
1214 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1215 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
1216
1217 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
1218 AssertRCReturn(rc, rc);
1219
1220 /*
1221 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1222 * can simply prefetch it.
1223 *
1224 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1225 */
1226 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1227 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1228 return VINF_SUCCESS;
1229}
1230# endif /* !IEM_VERIFICATION_MODE_FULL */
1231
1232
1233/**
1234 * Reset a previously modified MMIO region; restore the access flags.
1235 *
1236 * @returns VBox status code.
1237 *
1238 * @param pVM The cross context VM structure.
1239 * @param GCPhys Physical address that's part of the MMIO region to be reset.
1240 */
1241VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
1242{
1243 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
1244
1245 PVMCPU pVCpu = VMMGetCpu(pVM);
1246
1247 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1248 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1249 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1250 && !HMIsNestedPagingActive(pVM)))
1251 return VINF_SUCCESS; /* ignore */
1252
1253 /*
1254 * Lookup the context range node the page belongs to.
1255 */
1256# ifdef VBOX_STRICT
1257 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1258 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
1259 AssertMsgReturn(pRange,
1260 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1261 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1262 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1263# endif
1264
1265 /*
1266 * Call PGM to do the job work.
1267 *
1268 * After the call, all the pages should be non-present... unless there is
1269 * a page pool flush pending (unlikely).
1270 */
1271 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
1272 AssertRC(rc);
1273
1274# ifdef VBOX_STRICT
1275 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
1276 {
1277 uint32_t cb = pRange->cb;
1278 GCPhys = pRange->GCPhys;
1279 while (cb)
1280 {
1281 uint64_t fFlags;
1282 RTHCPHYS HCPhys;
1283 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1284 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1285 cb -= PAGE_SIZE;
1286 GCPhys += PAGE_SIZE;
1287 }
1288 }
1289# endif
1290 return rc;
1291}
1292
1293#endif /* !IN_RC */
1294
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette