VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 62653

Last change on this file since 62653 was 62653, checked in by vboxsync, 9 years ago

VMMR3: warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.0 KB
Line 
1/* $Id: IOMAllMMIO.cpp 62653 2016-07-28 22:11:57Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49
50#ifndef IN_RING3
51/**
52 * Defers a pending MMIO write to ring-3.
53 *
54 * @returns VINF_IOM_R3_MMIO_COMMIT_WRITE
55 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
56 * @param GCPhys The write address.
57 * @param pvBuf The bytes being written.
58 * @param cbBuf How many bytes.
59 * @param pRange The range, if resolved.
60 */
61static VBOXSTRICTRC iomMmioRing3WritePending(PVMCPU pVCpu, RTGCPHYS GCPhys, void const *pvBuf, size_t cbBuf, PIOMMMIORANGE pRange)
62{
63 Log5(("iomMmioRing3WritePending: %RGp LB %#x\n", GCPhys, cbBuf));
64 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
65 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys;
66 AssertReturn(cbBuf <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
67 pVCpu->iom.s.PendingMmioWrite.cbValue = (uint32_t)cbBuf;
68 memcpy(pVCpu->iom.s.PendingMmioWrite.abValue, pvBuf, cbBuf);
69 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
70 RT_NOREF_PV(pRange);
71 return VINF_IOM_R3_MMIO_COMMIT_WRITE;
72}
73#endif
74
75
76/**
77 * Deals with complicated MMIO writes.
78 *
79 * Complicated means unaligned or non-dword/qword sized accesses depending on
80 * the MMIO region's access mode flags.
81 *
82 * @returns Strict VBox status code. Any EM scheduling status code,
83 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
84 * VINF_IOM_R3_MMIO_READ may be returned.
85 *
86 * @param pVM The cross context VM structure.
87 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
88 * @param pRange The range to write to.
89 * @param GCPhys The physical address to start writing.
90 * @param pvValue Where to store the value.
91 * @param cbValue The size of the value to write.
92 */
93static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
94 void const *pvValue, unsigned cbValue)
95{
96 RT_NOREF_PV(pVCpu);
97 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
98 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
99 VERR_IOM_MMIO_IPE_1);
100 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
101 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
102 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
103 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
104
105 /*
106 * Do debug stop if requested.
107 */
108 int rc = VINF_SUCCESS; NOREF(pVM);
109#ifdef VBOX_STRICT
110 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
111 {
112# ifdef IN_RING3
113 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
114 R3STRING(pRange->pszDesc)));
115 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
116 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
117 if (rc == VERR_DBGF_NOT_ATTACHED)
118 rc = VINF_SUCCESS;
119# else
120 return VINF_IOM_R3_MMIO_WRITE;
121# endif
122 }
123#endif
124
125 /*
126 * Check if we should ignore the write.
127 */
128 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
129 {
130 Assert(cbValue != 4 || (GCPhys & 3));
131 return VINF_SUCCESS;
132 }
133 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
134 {
135 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
136 return VINF_SUCCESS;
137 }
138
139 /*
140 * Split and conquer.
141 */
142 for (;;)
143 {
144 unsigned const offAccess = GCPhys & 3;
145 unsigned cbThisPart = 4 - offAccess;
146 if (cbThisPart > cbValue)
147 cbThisPart = cbValue;
148
149 /*
150 * Get the missing bits (if any).
151 */
152 uint32_t u32MissingValue = 0;
153 if (fReadMissing && cbThisPart != 4)
154 {
155 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
156 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
157 switch (rc2)
158 {
159 case VINF_SUCCESS:
160 break;
161 case VINF_IOM_MMIO_UNUSED_FF:
162 u32MissingValue = UINT32_C(0xffffffff);
163 break;
164 case VINF_IOM_MMIO_UNUSED_00:
165 u32MissingValue = 0;
166 break;
167#ifndef IN_RING3
168 case VINF_IOM_R3_MMIO_READ:
169 case VINF_IOM_R3_MMIO_READ_WRITE:
170 case VINF_IOM_R3_MMIO_WRITE:
171 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
172 rc2 = VBOXSTRICTRC_TODO(iomMmioRing3WritePending(pVCpu, GCPhys, pvValue, cbValue, pRange));
173 if (rc == VINF_SUCCESS || rc2 < rc)
174 rc = rc2;
175 return rc;
176#endif
177 default:
178 if (RT_FAILURE(rc2))
179 {
180 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
181 return rc2;
182 }
183 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
184 if (rc == VINF_SUCCESS || rc2 < rc)
185 rc = rc2;
186 break;
187 }
188 }
189
190 /*
191 * Merge missing and given bits.
192 */
193 uint32_t u32GivenMask;
194 uint32_t u32GivenValue;
195 switch (cbThisPart)
196 {
197 case 1:
198 u32GivenValue = *(uint8_t const *)pvValue;
199 u32GivenMask = UINT32_C(0x000000ff);
200 break;
201 case 2:
202 u32GivenValue = *(uint16_t const *)pvValue;
203 u32GivenMask = UINT32_C(0x0000ffff);
204 break;
205 case 3:
206 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
207 ((uint8_t const *)pvValue)[2], 0);
208 u32GivenMask = UINT32_C(0x00ffffff);
209 break;
210 case 4:
211 u32GivenValue = *(uint32_t const *)pvValue;
212 u32GivenMask = UINT32_C(0xffffffff);
213 break;
214 default:
215 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
216 }
217 if (offAccess)
218 {
219 u32GivenValue <<= offAccess * 8;
220 u32GivenMask <<= offAccess * 8;
221 }
222
223 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
224 | (u32GivenValue & u32GivenMask);
225
226 /*
227 * Do DWORD write to the device.
228 */
229 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
230 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
231 switch (rc2)
232 {
233 case VINF_SUCCESS:
234 break;
235#ifndef IN_RING3
236 case VINF_IOM_R3_MMIO_READ:
237 case VINF_IOM_R3_MMIO_READ_WRITE:
238 case VINF_IOM_R3_MMIO_WRITE:
239 Log3(("iomMMIODoComplicatedWrite: deferring GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
240 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
241 AssertReturn(cbValue + (GCPhys & 3) <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
242 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys & ~(RTGCPHYS)3;
243 pVCpu->iom.s.PendingMmioWrite.cbValue = cbValue + (GCPhys & 3);
244 *(uint32_t *)pVCpu->iom.s.PendingMmioWrite.abValue = u32Value;
245 if (cbValue > cbThisPart)
246 memcpy(&pVCpu->iom.s.PendingMmioWrite.abValue[4],
247 (uint8_t const *)pvValue + cbThisPart, cbValue - cbThisPart);
248 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
249 if (rc == VINF_SUCCESS)
250 rc = VINF_IOM_R3_MMIO_COMMIT_WRITE;
251 return rc2;
252#endif
253 default:
254 if (RT_FAILURE(rc2))
255 {
256 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
257 return rc2;
258 }
259 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
260 if (rc == VINF_SUCCESS || rc2 < rc)
261 rc = rc2;
262 break;
263 }
264
265 /*
266 * Advance.
267 */
268 cbValue -= cbThisPart;
269 if (!cbValue)
270 break;
271 GCPhys += cbThisPart;
272 pvValue = (uint8_t const *)pvValue + cbThisPart;
273 }
274
275 return rc;
276}
277
278
279
280
281/**
282 * Wrapper which does the write and updates range statistics when such are enabled.
283 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
284 */
285static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
286 const void *pvData, unsigned cb)
287{
288#ifdef VBOX_WITH_STATISTICS
289 int rcSem = IOM_LOCK_SHARED(pVM);
290 if (rcSem == VERR_SEM_BUSY)
291 return VINF_IOM_R3_MMIO_WRITE;
292 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
293 if (!pStats)
294# ifdef IN_RING3
295 return VERR_NO_MEMORY;
296# else
297 return VINF_IOM_R3_MMIO_WRITE;
298# endif
299 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
300#else
301 NOREF(pVCpu);
302#endif
303
304 VBOXSTRICTRC rcStrict;
305 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
306 {
307 if ( (cb == 4 && !(GCPhysFault & 3))
308 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
309 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
310 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
311 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
312 else
313 rcStrict = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhysFault, pvData, cb);
314 }
315 else
316 rcStrict = VINF_SUCCESS;
317
318 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
319 STAM_COUNTER_INC(&pStats->Accesses);
320 return rcStrict;
321}
322
323
324/**
325 * Deals with complicated MMIO reads.
326 *
327 * Complicated means unaligned or non-dword/qword sized accesses depending on
328 * the MMIO region's access mode flags.
329 *
330 * @returns Strict VBox status code. Any EM scheduling status code,
331 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
332 * VINF_IOM_R3_MMIO_WRITE may be returned.
333 *
334 * @param pVM The cross context VM structure.
335 * @param pRange The range to read from.
336 * @param GCPhys The physical address to start reading.
337 * @param pvValue Where to store the value.
338 * @param cbValue The size of the value to read.
339 */
340static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
341{
342 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
343 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
344 VERR_IOM_MMIO_IPE_1);
345 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
346 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
347
348 /*
349 * Do debug stop if requested.
350 */
351 int rc = VINF_SUCCESS; NOREF(pVM);
352#ifdef VBOX_STRICT
353 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
354 {
355# ifdef IN_RING3
356 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
357 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
358 if (rc == VERR_DBGF_NOT_ATTACHED)
359 rc = VINF_SUCCESS;
360# else
361 return VINF_IOM_R3_MMIO_READ;
362# endif
363 }
364#endif
365
366 /*
367 * Split and conquer.
368 */
369 for (;;)
370 {
371 /*
372 * Do DWORD read from the device.
373 */
374 uint32_t u32Value;
375 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
376 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
377 switch (rc2)
378 {
379 case VINF_SUCCESS:
380 break;
381 case VINF_IOM_MMIO_UNUSED_FF:
382 u32Value = UINT32_C(0xffffffff);
383 break;
384 case VINF_IOM_MMIO_UNUSED_00:
385 u32Value = 0;
386 break;
387 case VINF_IOM_R3_MMIO_READ:
388 case VINF_IOM_R3_MMIO_READ_WRITE:
389 case VINF_IOM_R3_MMIO_WRITE:
390 /** @todo What if we've split a transfer and already read
391 * something? Since reads can have sideeffects we could be
392 * kind of screwed here... */
393 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
394 return rc2;
395 default:
396 if (RT_FAILURE(rc2))
397 {
398 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
399 return rc2;
400 }
401 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
402 if (rc == VINF_SUCCESS || rc2 < rc)
403 rc = rc2;
404 break;
405 }
406 u32Value >>= (GCPhys & 3) * 8;
407
408 /*
409 * Write what we've read.
410 */
411 unsigned cbThisPart = 4 - (GCPhys & 3);
412 if (cbThisPart > cbValue)
413 cbThisPart = cbValue;
414
415 switch (cbThisPart)
416 {
417 case 1:
418 *(uint8_t *)pvValue = (uint8_t)u32Value;
419 break;
420 case 2:
421 *(uint16_t *)pvValue = (uint16_t)u32Value;
422 break;
423 case 3:
424 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
425 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
426 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
427 break;
428 case 4:
429 *(uint32_t *)pvValue = u32Value;
430 break;
431 }
432
433 /*
434 * Advance.
435 */
436 cbValue -= cbThisPart;
437 if (!cbValue)
438 break;
439 GCPhys += cbThisPart;
440 pvValue = (uint8_t *)pvValue + cbThisPart;
441 }
442
443 return rc;
444}
445
446
447/**
448 * Implements VINF_IOM_MMIO_UNUSED_FF.
449 *
450 * @returns VINF_SUCCESS.
451 * @param pvValue Where to store the zeros.
452 * @param cbValue How many bytes to read.
453 */
454static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
455{
456 switch (cbValue)
457 {
458 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
459 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
460 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
461 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
462 default:
463 {
464 uint8_t *pb = (uint8_t *)pvValue;
465 while (cbValue--)
466 *pb++ = UINT8_C(0xff);
467 break;
468 }
469 }
470 return VINF_SUCCESS;
471}
472
473
474/**
475 * Implements VINF_IOM_MMIO_UNUSED_00.
476 *
477 * @returns VINF_SUCCESS.
478 * @param pvValue Where to store the zeros.
479 * @param cbValue How many bytes to read.
480 */
481static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
482{
483 switch (cbValue)
484 {
485 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
486 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
487 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
488 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
489 default:
490 {
491 uint8_t *pb = (uint8_t *)pvValue;
492 while (cbValue--)
493 *pb++ = UINT8_C(0x00);
494 break;
495 }
496 }
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Wrapper which does the read and updates range statistics when such are enabled.
503 */
504DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
505 void *pvValue, unsigned cbValue)
506{
507#ifdef VBOX_WITH_STATISTICS
508 int rcSem = IOM_LOCK_SHARED(pVM);
509 if (rcSem == VERR_SEM_BUSY)
510 return VINF_IOM_R3_MMIO_READ;
511 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
512 if (!pStats)
513# ifdef IN_RING3
514 return VERR_NO_MEMORY;
515# else
516 return VINF_IOM_R3_MMIO_READ;
517# endif
518 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
519#else
520 NOREF(pVCpu);
521#endif
522
523 VBOXSTRICTRC rcStrict;
524 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
525 {
526 if ( ( cbValue == 4
527 && !(GCPhys & 3))
528 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
529 || ( cbValue == 8
530 && !(GCPhys & 7)
531 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
532 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
533 pvValue, cbValue);
534 else
535 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
536 }
537 else
538 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
539 if (rcStrict != VINF_SUCCESS)
540 {
541 switch (VBOXSTRICTRC_VAL(rcStrict))
542 {
543 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
544 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
545 }
546 }
547
548 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
549 STAM_COUNTER_INC(&pStats->Accesses);
550 return rcStrict;
551}
552
553
554/**
555 * Internal - statistics only.
556 */
557DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
558{
559#ifdef VBOX_WITH_STATISTICS
560 switch (cb)
561 {
562 case 1:
563 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
564 break;
565 case 2:
566 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
567 break;
568 case 4:
569 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
570 break;
571 case 8:
572 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
573 break;
574 default:
575 /* No way. */
576 AssertMsgFailed(("Invalid data length %d\n", cb));
577 break;
578 }
579#else
580 NOREF(pVM); NOREF(cb);
581#endif
582}
583
584
585
586/**
587 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
588 *
589 * @returns VBox status code (appropriate for GC return).
590 * @param pVM The cross context VM structure.
591 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
592 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
593 * any error code (the EPT misconfig hack).
594 * @param pCtxCore Trap register frame.
595 * @param GCPhysFault The GC physical address corresponding to pvFault.
596 * @param pvUser Pointer to the MMIO ring-3 range entry.
597 */
598static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
599 RTGCPHYS GCPhysFault, void *pvUser)
600{
601 RT_NOREF_PV(uErrorCode);
602 int rc = IOM_LOCK_SHARED(pVM);
603#ifndef IN_RING3
604 if (rc == VERR_SEM_BUSY)
605 return VINF_IOM_R3_MMIO_READ_WRITE;
606#endif
607 AssertRC(rc);
608
609 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
610 Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
611
612 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
613 Assert(pRange);
614 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
615 iomMmioRetainRange(pRange);
616#ifndef VBOX_WITH_STATISTICS
617 IOM_UNLOCK_SHARED(pVM);
618
619#else
620 /*
621 * Locate the statistics.
622 */
623 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
624 if (!pStats)
625 {
626 iomMmioReleaseRange(pVM, pRange);
627# ifdef IN_RING3
628 return VERR_NO_MEMORY;
629# else
630 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
631 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
632 return VINF_IOM_R3_MMIO_READ_WRITE;
633# endif
634 }
635#endif
636
637#ifndef IN_RING3
638 /*
639 * Should we defer the request right away? This isn't usually the case, so
640 * do the simple test first and the try deal with uErrorCode being N/A.
641 */
642 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
643 || !pRange->CTX_SUFF(pfnReadCallback))
644 && ( uErrorCode == UINT32_MAX
645 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
646 : uErrorCode & X86_TRAP_PF_RW
647 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
648 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
649 )
650 )
651 )
652 {
653 if (uErrorCode & X86_TRAP_PF_RW)
654 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
655 else
656 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
657
658 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
659 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
660 iomMmioReleaseRange(pVM, pRange);
661 return VINF_IOM_R3_MMIO_READ_WRITE;
662 }
663#endif /* !IN_RING3 */
664
665 /*
666 * Retain the range and do locking.
667 */
668 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
669 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
670 if (rc != VINF_SUCCESS)
671 {
672 iomMmioReleaseRange(pVM, pRange);
673 return rc;
674 }
675
676 /*
677 * Let IEM call us back via iomMmioHandler.
678 */
679 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
680
681 NOREF(pCtxCore); NOREF(GCPhysFault);
682 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
683 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
684 iomMmioReleaseRange(pVM, pRange);
685 if (RT_SUCCESS(rcStrict))
686 return rcStrict;
687 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
688 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
689 {
690 Log(("IOM: Hit unsupported IEM feature!\n"));
691 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
692 }
693 return rcStrict;
694}
695
696
697/**
698 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
699 * \#PF access handler callback for MMIO pages.}
700 *
701 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
702 */
703DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
704 RTGCPHYS GCPhysFault, void *pvUser)
705{
706 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
707 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); NOREF(pvFault);
708 return iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
709}
710
711
712/**
713 * Physical access handler for MMIO ranges.
714 *
715 * @returns VBox status code (appropriate for GC return).
716 * @param pVM The cross context VM structure.
717 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
718 * @param uErrorCode CPU Error code.
719 * @param pCtxCore Trap register frame.
720 * @param GCPhysFault The GC physical address.
721 */
722VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
723{
724 /*
725 * We don't have a range here, so look it up before calling the common function.
726 */
727 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
728#ifndef IN_RING3
729 if (rc2 == VERR_SEM_BUSY)
730 return VINF_IOM_R3_MMIO_READ_WRITE;
731#endif
732 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
733 if (RT_UNLIKELY(!pRange))
734 {
735 IOM_UNLOCK_SHARED(pVM);
736 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
737 }
738 iomMmioRetainRange(pRange);
739 IOM_UNLOCK_SHARED(pVM);
740
741 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
742
743 iomMmioReleaseRange(pVM, pRange);
744 return VBOXSTRICTRC_VAL(rcStrict);
745}
746
747
748/**
749 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
750 *
751 * @remarks The @a pvUser argument points to the MMIO range entry.
752 */
753PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
754 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
755{
756 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
757 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
758
759 NOREF(pvPhys); NOREF(enmOrigin);
760 AssertPtr(pRange);
761 AssertMsg(cbBuf >= 1, ("%zu\n", cbBuf));
762
763
764#ifndef IN_RING3
765 /*
766 * If someone is doing FXSAVE, FXRSTOR, XSAVE, XRSTOR or other stuff dealing with
767 * large amounts of data, just go to ring-3 where we don't need to deal with partial
768 * successes. No chance any of these will be problematic read-modify-write stuff.
769 */
770 if (cbBuf > sizeof(pVCpu->iom.s.PendingMmioWrite.abValue))
771 return enmAccessType == PGMACCESSTYPE_WRITE ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
772#endif
773
774 /*
775 * Validate the range.
776 */
777 int rc = IOM_LOCK_SHARED(pVM);
778#ifndef IN_RING3
779 if (rc == VERR_SEM_BUSY)
780 {
781 if (enmAccessType == PGMACCESSTYPE_READ)
782 return VINF_IOM_R3_MMIO_READ;
783 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
784 return iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, NULL /*pRange*/);
785 }
786#endif
787 AssertRC(rc);
788 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
789
790 /*
791 * Perform locking.
792 */
793 iomMmioRetainRange(pRange);
794 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
795 IOM_UNLOCK_SHARED(pVM);
796 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
797 if (rcStrict == VINF_SUCCESS)
798 {
799 /*
800 * Perform the access.
801 */
802 if (enmAccessType == PGMACCESSTYPE_READ)
803 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
804 else
805 {
806 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
807#ifndef IN_RING3
808 if (rcStrict == VINF_IOM_R3_MMIO_WRITE)
809 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
810#endif
811 }
812
813 /* Check the return code. */
814#ifdef IN_RING3
815 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
816#else
817 AssertMsg( rcStrict == VINF_SUCCESS
818 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
819 || (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE && enmAccessType == PGMACCESSTYPE_WRITE)
820 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
821 || rcStrict == VINF_EM_DBG_STOP
822 || rcStrict == VINF_EM_DBG_EVENT
823 || rcStrict == VINF_EM_DBG_BREAKPOINT
824 || rcStrict == VINF_EM_OFF
825 || rcStrict == VINF_EM_SUSPEND
826 || rcStrict == VINF_EM_RESET
827 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
828 //|| rcStrict == VINF_EM_HALT /* ?? */
829 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
830 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
831#endif
832
833 iomMmioReleaseRange(pVM, pRange);
834 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
835 }
836#ifdef IN_RING3
837 else
838 iomMmioReleaseRange(pVM, pRange);
839#else
840 else
841 {
842 if (rcStrict == VINF_IOM_R3_MMIO_READ_WRITE)
843 {
844 if (enmAccessType == PGMACCESSTYPE_READ)
845 rcStrict = VINF_IOM_R3_MMIO_READ;
846 else
847 {
848 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
849 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
850 }
851 }
852 iomMmioReleaseRange(pVM, pRange);
853 }
854#endif
855 return rcStrict;
856}
857
858
859#ifdef IN_RING3 /* Only used by REM. */
860
861/**
862 * Reads a MMIO register.
863 *
864 * @returns VBox status code.
865 *
866 * @param pVM The cross context VM structure.
867 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
868 * @param GCPhys The physical address to read.
869 * @param pu32Value Where to store the value read.
870 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
871 */
872VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
873{
874 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
875 /* Take the IOM lock before performing any MMIO. */
876 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
877#ifndef IN_RING3
878 if (rc == VERR_SEM_BUSY)
879 return VINF_IOM_R3_MMIO_WRITE;
880#endif
881 AssertRC(VBOXSTRICTRC_VAL(rc));
882#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
883 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
884#endif
885
886 /*
887 * Lookup the current context range node and statistics.
888 */
889 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
890 if (!pRange)
891 {
892 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
893 IOM_UNLOCK_SHARED(pVM);
894 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
895 }
896 iomMmioRetainRange(pRange);
897#ifndef VBOX_WITH_STATISTICS
898 IOM_UNLOCK_SHARED(pVM);
899
900#else /* VBOX_WITH_STATISTICS */
901 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
902 if (!pStats)
903 {
904 iomMmioReleaseRange(pVM, pRange);
905# ifdef IN_RING3
906 return VERR_NO_MEMORY;
907# else
908 return VINF_IOM_R3_MMIO_READ;
909# endif
910 }
911 STAM_COUNTER_INC(&pStats->Accesses);
912#endif /* VBOX_WITH_STATISTICS */
913
914 if (pRange->CTX_SUFF(pfnReadCallback))
915 {
916 /*
917 * Perform locking.
918 */
919 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
920 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
921 if (rc != VINF_SUCCESS)
922 {
923 iomMmioReleaseRange(pVM, pRange);
924 return rc;
925 }
926
927 /*
928 * Perform the read and deal with the result.
929 */
930 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
931 if ( (cbValue == 4 && !(GCPhys & 3))
932 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
933 || (cbValue == 8 && !(GCPhys & 7)) )
934 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
935 pu32Value, (unsigned)cbValue);
936 else
937 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
938 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
939 switch (VBOXSTRICTRC_VAL(rc))
940 {
941 case VINF_SUCCESS:
942 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
943 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
944 iomMmioReleaseRange(pVM, pRange);
945 return rc;
946#ifndef IN_RING3
947 case VINF_IOM_R3_MMIO_READ:
948 case VINF_IOM_R3_MMIO_READ_WRITE:
949 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
950#endif
951 default:
952 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
953 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
954 iomMmioReleaseRange(pVM, pRange);
955 return rc;
956
957 case VINF_IOM_MMIO_UNUSED_00:
958 iomMMIODoRead00s(pu32Value, cbValue);
959 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
960 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
961 iomMmioReleaseRange(pVM, pRange);
962 return VINF_SUCCESS;
963
964 case VINF_IOM_MMIO_UNUSED_FF:
965 iomMMIODoReadFFs(pu32Value, cbValue);
966 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
967 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
968 iomMmioReleaseRange(pVM, pRange);
969 return VINF_SUCCESS;
970 }
971 /* not reached */
972 }
973#ifndef IN_RING3
974 if (pRange->pfnReadCallbackR3)
975 {
976 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
977 iomMmioReleaseRange(pVM, pRange);
978 return VINF_IOM_R3_MMIO_READ;
979 }
980#endif
981
982 /*
983 * Unassigned memory - this is actually not supposed t happen...
984 */
985 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
986 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
987 iomMMIODoReadFFs(pu32Value, cbValue);
988 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
989 iomMmioReleaseRange(pVM, pRange);
990 return VINF_SUCCESS;
991}
992
993
994/**
995 * Writes to a MMIO register.
996 *
997 * @returns VBox status code.
998 *
999 * @param pVM The cross context VM structure.
1000 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1001 * @param GCPhys The physical address to write to.
1002 * @param u32Value The value to write.
1003 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1004 */
1005VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1006{
1007 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
1008 /* Take the IOM lock before performing any MMIO. */
1009 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
1010#ifndef IN_RING3
1011 if (rc == VERR_SEM_BUSY)
1012 return VINF_IOM_R3_MMIO_WRITE;
1013#endif
1014 AssertRC(VBOXSTRICTRC_VAL(rc));
1015#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1016 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1017#endif
1018
1019 /*
1020 * Lookup the current context range node.
1021 */
1022 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1023 if (!pRange)
1024 {
1025 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1026 IOM_UNLOCK_SHARED(pVM);
1027 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1028 }
1029 iomMmioRetainRange(pRange);
1030#ifndef VBOX_WITH_STATISTICS
1031 IOM_UNLOCK_SHARED(pVM);
1032
1033#else /* VBOX_WITH_STATISTICS */
1034 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
1035 if (!pStats)
1036 {
1037 iomMmioReleaseRange(pVM, pRange);
1038# ifdef IN_RING3
1039 return VERR_NO_MEMORY;
1040# else
1041 return VINF_IOM_R3_MMIO_WRITE;
1042# endif
1043 }
1044 STAM_COUNTER_INC(&pStats->Accesses);
1045#endif /* VBOX_WITH_STATISTICS */
1046
1047 if (pRange->CTX_SUFF(pfnWriteCallback))
1048 {
1049 /*
1050 * Perform locking.
1051 */
1052 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1053 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
1054 if (rc != VINF_SUCCESS)
1055 {
1056 iomMmioReleaseRange(pVM, pRange);
1057 return rc;
1058 }
1059
1060 /*
1061 * Perform the write.
1062 */
1063 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1064 if ( (cbValue == 4 && !(GCPhys & 3))
1065 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
1066 || (cbValue == 8 && !(GCPhys & 7)) )
1067 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1068 GCPhys, &u32Value, (unsigned)cbValue);
1069 else
1070 rc = iomMMIODoComplicatedWrite(pVM, pVCpu, pRange, GCPhys, &u32Value, (unsigned)cbValue);
1071 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1072#ifndef IN_RING3
1073 if ( rc == VINF_IOM_R3_MMIO_WRITE
1074 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
1075 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1076#endif
1077 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1078 iomMmioReleaseRange(pVM, pRange);
1079 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1080 return rc;
1081 }
1082#ifndef IN_RING3
1083 if (pRange->pfnWriteCallbackR3)
1084 {
1085 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1086 iomMmioReleaseRange(pVM, pRange);
1087 return VINF_IOM_R3_MMIO_WRITE;
1088 }
1089#endif
1090
1091 /*
1092 * No write handler, nothing to do.
1093 */
1094 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1095 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1096 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
1097 iomMmioReleaseRange(pVM, pRange);
1098 return VINF_SUCCESS;
1099}
1100
1101#endif /* IN_RING3 - only used by REM. */
1102#ifndef IN_RC
1103
1104/**
1105 * Mapping an MMIO2 page in place of an MMIO page for direct access.
1106 *
1107 * (This is a special optimization used by the VGA device.)
1108 *
1109 * @returns VBox status code. This API may return VINF_SUCCESS even if no
1110 * remapping is made,.
1111 *
1112 * @param pVM The cross context VM structure.
1113 * @param GCPhys The address of the MMIO page to be changed.
1114 * @param GCPhysRemapped The address of the MMIO2 page.
1115 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1116 * for the time being.
1117 */
1118VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
1119{
1120# ifndef IEM_VERIFICATION_MODE_FULL
1121 /* Currently only called from the VGA device during MMIO. */
1122 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
1123 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1124 PVMCPU pVCpu = VMMGetCpu(pVM);
1125
1126 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1127 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1128 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1129 && !HMIsNestedPagingActive(pVM)))
1130 return VINF_SUCCESS; /* ignore */
1131
1132 int rc = IOM_LOCK_SHARED(pVM);
1133 if (RT_FAILURE(rc))
1134 return VINF_SUCCESS; /* better luck the next time around */
1135
1136 /*
1137 * Lookup the context range node the page belongs to.
1138 */
1139 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1140 AssertMsgReturn(pRange,
1141 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1142
1143 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1144 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1145
1146 /*
1147 * Do the aliasing; page align the addresses since PGM is picky.
1148 */
1149 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1150 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1151
1152 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
1153
1154 IOM_UNLOCK_SHARED(pVM);
1155 AssertRCReturn(rc, rc);
1156
1157 /*
1158 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1159 * can simply prefetch it.
1160 *
1161 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1162 */
1163# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
1164# ifdef VBOX_STRICT
1165 uint64_t fFlags;
1166 RTHCPHYS HCPhys;
1167 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1168 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1169# endif
1170# endif
1171 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1172 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1173# endif /* !IEM_VERIFICATION_MODE_FULL */
1174 return VINF_SUCCESS;
1175}
1176
1177
1178# ifndef IEM_VERIFICATION_MODE_FULL
1179/**
1180 * Mapping a HC page in place of an MMIO page for direct access.
1181 *
1182 * (This is a special optimization used by the APIC in the VT-x case.)
1183 *
1184 * @returns VBox status code.
1185 *
1186 * @param pVM The cross context VM structure.
1187 * @param pVCpu The cross context virtual CPU structure.
1188 * @param GCPhys The address of the MMIO page to be changed.
1189 * @param HCPhys The address of the host physical page.
1190 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
1191 * for the time being.
1192 */
1193VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
1194{
1195 /* Currently only called from VT-x code during a page fault. */
1196 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
1197
1198 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
1199 Assert(HMIsEnabled(pVM));
1200
1201 /*
1202 * Lookup the context range node the page belongs to.
1203 */
1204# ifdef VBOX_STRICT
1205 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1206 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
1207 AssertMsgReturn(pRange,
1208 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1209 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1210 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1211# endif
1212
1213 /*
1214 * Do the aliasing; page align the addresses since PGM is picky.
1215 */
1216 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
1217 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
1218
1219 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
1220 AssertRCReturn(rc, rc);
1221
1222 /*
1223 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
1224 * can simply prefetch it.
1225 *
1226 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
1227 */
1228 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
1229 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1230 return VINF_SUCCESS;
1231}
1232# endif /* !IEM_VERIFICATION_MODE_FULL */
1233
1234
1235/**
1236 * Reset a previously modified MMIO region; restore the access flags.
1237 *
1238 * @returns VBox status code.
1239 *
1240 * @param pVM The cross context VM structure.
1241 * @param GCPhys Physical address that's part of the MMIO region to be reset.
1242 */
1243VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
1244{
1245 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
1246
1247 PVMCPU pVCpu = VMMGetCpu(pVM);
1248
1249 /* This currently only works in real mode, protected mode without paging or with nested paging. */
1250 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
1251 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
1252 && !HMIsNestedPagingActive(pVM)))
1253 return VINF_SUCCESS; /* ignore */
1254
1255 /*
1256 * Lookup the context range node the page belongs to.
1257 */
1258# ifdef VBOX_STRICT
1259 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
1260 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
1261 AssertMsgReturn(pRange,
1262 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
1263 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
1264 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1265# endif
1266
1267 /*
1268 * Call PGM to do the job work.
1269 *
1270 * After the call, all the pages should be non-present... unless there is
1271 * a page pool flush pending (unlikely).
1272 */
1273 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
1274 AssertRC(rc);
1275
1276# ifdef VBOX_STRICT
1277 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
1278 {
1279 uint32_t cb = pRange->cb;
1280 GCPhys = pRange->GCPhys;
1281 while (cb)
1282 {
1283 uint64_t fFlags;
1284 RTHCPHYS HCPhys;
1285 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
1286 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1287 cb -= PAGE_SIZE;
1288 GCPhys += PAGE_SIZE;
1289 }
1290 }
1291# endif
1292 return rc;
1293}
1294
1295#endif /* !IN_RC */
1296
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette