VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 45276

Last change on this file since 45276 was 44715, checked in by vboxsync, 12 years ago

iomMMIODoRead: Don't let 64-bit read thru unless the read mode is IOMMIO_FLAGS_READ_DWORD_QWORD.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 89.1 KB
Line 
1/* $Id: IOMAllMMIO.cpp 44715 2013-02-15 14:23:06Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
32# include <VBox/vmm/iem.h>
33#endif
34#include "IOMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/hm.h>
38#include "IOMInline.h"
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/vmm/pdmdev.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <VBox/log.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54
55/**
56 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
57 */
58static const unsigned g_aSize2Shift[] =
59{
60 ~0U, /* 0 - invalid */
61 0, /* *1 == 2^0 */
62 1, /* *2 == 2^1 */
63 ~0U, /* 3 - invalid */
64 2, /* *4 == 2^2 */
65 ~0U, /* 5 - invalid */
66 ~0U, /* 6 - invalid */
67 ~0U, /* 7 - invalid */
68 3 /* *8 == 2^3 */
69};
70
71/**
72 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
73 */
74#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
75
76
77/**
78 * Deals with complicated MMIO writes.
79 *
80 * Complicated means unaligned or non-dword/qword sized accesses depending on
81 * the MMIO region's access mode flags.
82 *
83 * @returns Strict VBox status code. Any EM scheduling status code,
84 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
85 * VINF_IOM_R3_MMIO_READ may be returned.
86 *
87 * @param pVM Pointer to the VM.
88 * @param pRange The range to write to.
89 * @param GCPhys The physical address to start writing.
90 * @param pvValue Where to store the value.
91 * @param cbValue The size of the value to write.
92 */
93static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
94{
95 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
96 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
97 VERR_IOM_MMIO_IPE_1);
98 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
99 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
100 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
101 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
102
103 /*
104 * Do debug stop if requested.
105 */
106 int rc = VINF_SUCCESS; NOREF(pVM);
107#ifdef VBOX_STRICT
108 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
109 {
110# ifdef IN_RING3
111 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
112 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
113 if (rc == VERR_DBGF_NOT_ATTACHED)
114 rc = VINF_SUCCESS;
115# else
116 return VINF_IOM_R3_MMIO_WRITE;
117# endif
118 }
119#endif
120
121 /*
122 * Check if we should ignore the write.
123 */
124 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
125 {
126 Assert(cbValue != 4 || (GCPhys & 3));
127 return VINF_SUCCESS;
128 }
129 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
130 {
131 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
132 return VINF_SUCCESS;
133 }
134
135 /*
136 * Split and conquer.
137 */
138 for (;;)
139 {
140 unsigned const offAccess = GCPhys & 3;
141 unsigned cbThisPart = 4 - offAccess;
142 if (cbThisPart > cbValue)
143 cbThisPart = cbValue;
144
145 /*
146 * Get the missing bits (if any).
147 */
148 uint32_t u32MissingValue = 0;
149 if (fReadMissing && cbThisPart != 4)
150 {
151 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
152 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
153 switch (rc2)
154 {
155 case VINF_SUCCESS:
156 break;
157 case VINF_IOM_MMIO_UNUSED_FF:
158 u32MissingValue = UINT32_C(0xffffffff);
159 break;
160 case VINF_IOM_MMIO_UNUSED_00:
161 u32MissingValue = 0;
162 break;
163 case VINF_IOM_R3_MMIO_READ:
164 case VINF_IOM_R3_MMIO_READ_WRITE:
165 case VINF_IOM_R3_MMIO_WRITE:
166 /** @todo What if we've split a transfer and already read
167 * something? Since writes generally have sideeffects we
168 * could be kind of screwed here...
169 *
170 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
171 * to REM for MMIO accesses (like may currently do). */
172
173 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
174 return rc2;
175 default:
176 if (RT_FAILURE(rc2))
177 {
178 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
179 return rc2;
180 }
181 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
182 if (rc == VINF_SUCCESS || rc2 < rc)
183 rc = rc2;
184 break;
185 }
186 }
187
188 /*
189 * Merge missing and given bits.
190 */
191 uint32_t u32GivenMask;
192 uint32_t u32GivenValue;
193 switch (cbThisPart)
194 {
195 case 1:
196 u32GivenValue = *(uint8_t const *)pvValue;
197 u32GivenMask = UINT32_C(0x000000ff);
198 break;
199 case 2:
200 u32GivenValue = *(uint16_t const *)pvValue;
201 u32GivenMask = UINT32_C(0x0000ffff);
202 break;
203 case 3:
204 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
205 ((uint8_t const *)pvValue)[2], 0);
206 u32GivenMask = UINT32_C(0x00ffffff);
207 break;
208 case 4:
209 u32GivenValue = *(uint32_t const *)pvValue;
210 u32GivenMask = UINT32_C(0xffffffff);
211 break;
212 default:
213 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
214 }
215 if (offAccess)
216 {
217 u32GivenValue <<= offAccess * 8;
218 u32GivenMask <<= offAccess * 8;
219 }
220
221 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
222 | (u32GivenValue & u32GivenMask);
223
224 /*
225 * Do DWORD write to the device.
226 */
227 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
228 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
229 switch (rc2)
230 {
231 case VINF_SUCCESS:
232 break;
233 case VINF_IOM_R3_MMIO_READ:
234 case VINF_IOM_R3_MMIO_READ_WRITE:
235 case VINF_IOM_R3_MMIO_WRITE:
236 /** @todo What if we've split a transfer and already read
237 * something? Since reads can have sideeffects we could be
238 * kind of screwed here...
239 *
240 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
241 * to REM for MMIO accesses (like may currently do). */
242 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
243 return rc2;
244 default:
245 if (RT_FAILURE(rc2))
246 {
247 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
248 return rc2;
249 }
250 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
251 if (rc == VINF_SUCCESS || rc2 < rc)
252 rc = rc2;
253 break;
254 }
255
256 /*
257 * Advance.
258 */
259 cbValue -= cbThisPart;
260 if (!cbValue)
261 break;
262 GCPhys += cbThisPart;
263 pvValue = (uint8_t const *)pvValue + cbThisPart;
264 }
265
266 return rc;
267}
268
269
270
271
272/**
273 * Wrapper which does the write and updates range statistics when such are enabled.
274 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
275 */
276static int iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
277{
278#ifdef VBOX_WITH_STATISTICS
279 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
280 Assert(pStats);
281#endif
282
283 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
284 VBOXSTRICTRC rc;
285 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
286 {
287 if ( (cb == 4 && !(GCPhysFault & 3))
288 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
289 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
290 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
291 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
292 else
293 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
294 }
295 else
296 rc = VINF_SUCCESS;
297 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
298 STAM_COUNTER_INC(&pStats->Accesses);
299 return VBOXSTRICTRC_TODO(rc);
300}
301
302
303/**
304 * Deals with complicated MMIO reads.
305 *
306 * Complicatd means unaligned or non-dword/qword align accesses depending on
307 * the MMIO region's access mode flags.
308 *
309 * @returns Strict VBox status code. Any EM scheduling status code,
310 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
311 * VINF_IOM_R3_MMIO_WRITE may be returned.
312 *
313 * @param pVM Pointer to the VM.
314 * @param pRange The range to read from.
315 * @param GCPhys The physical address to start reading.
316 * @param pvValue Where to store the value.
317 * @param cbValue The size of the value to read.
318 */
319static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
320{
321 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
322 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
323 VERR_IOM_MMIO_IPE_1);
324 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
325 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
326
327 /*
328 * Do debug stop if requested.
329 */
330 int rc = VINF_SUCCESS; NOREF(pVM);
331#ifdef VBOX_STRICT
332 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
333 {
334# ifdef IN_RING3
335 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
336 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
337 if (rc == VERR_DBGF_NOT_ATTACHED)
338 rc = VINF_SUCCESS;
339# else
340 return VINF_IOM_R3_MMIO_READ;
341# endif
342 }
343#endif
344
345 /*
346 * Split and conquer.
347 */
348 for (;;)
349 {
350 /*
351 * Do DWORD read from the device.
352 */
353 uint32_t u32Value;
354 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
355 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
356 switch (rc2)
357 {
358 case VINF_SUCCESS:
359 break;
360 case VINF_IOM_MMIO_UNUSED_FF:
361 u32Value = UINT32_C(0xffffffff);
362 break;
363 case VINF_IOM_MMIO_UNUSED_00:
364 u32Value = 0;
365 break;
366 case VINF_IOM_R3_MMIO_READ:
367 case VINF_IOM_R3_MMIO_READ_WRITE:
368 case VINF_IOM_R3_MMIO_WRITE:
369 /** @todo What if we've split a transfer and already read
370 * something? Since reads can have sideeffects we could be
371 * kind of screwed here... */
372 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
373 return rc2;
374 default:
375 if (RT_FAILURE(rc2))
376 {
377 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
378 return rc2;
379 }
380 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
381 if (rc == VINF_SUCCESS || rc2 < rc)
382 rc = rc2;
383 break;
384 }
385 u32Value >>= (GCPhys & 3) * 8;
386
387 /*
388 * Write what we've read.
389 */
390 unsigned cbThisPart = 4 - (GCPhys & 3);
391 if (cbThisPart > cbValue)
392 cbThisPart = cbValue;
393
394 switch (cbThisPart)
395 {
396 case 1:
397 *(uint8_t *)pvValue = (uint8_t)u32Value;
398 break;
399 case 2:
400 *(uint16_t *)pvValue = (uint16_t)u32Value;
401 break;
402 case 3:
403 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
404 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
405 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
406 break;
407 case 4:
408 *(uint32_t *)pvValue = u32Value;
409 break;
410 }
411
412 /*
413 * Advance.
414 */
415 cbValue -= cbThisPart;
416 if (!cbValue)
417 break;
418 GCPhys += cbThisPart;
419 pvValue = (uint8_t *)pvValue + cbThisPart;
420 }
421
422 return rc;
423}
424
425
426/**
427 * Implements VINF_IOM_MMIO_UNUSED_FF.
428 *
429 * @returns VINF_SUCCESS.
430 * @param pvValue Where to store the zeros.
431 * @param cbValue How many bytes to read.
432 */
433static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
434{
435 switch (cbValue)
436 {
437 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
438 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
439 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
440 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
441 default:
442 {
443 uint8_t *pb = (uint8_t *)pvValue;
444 while (cbValue--)
445 *pb++ = UINT8_C(0xff);
446 break;
447 }
448 }
449 return VINF_SUCCESS;
450}
451
452
453/**
454 * Implements VINF_IOM_MMIO_UNUSED_00.
455 *
456 * @returns VINF_SUCCESS.
457 * @param pvValue Where to store the zeros.
458 * @param cbValue How many bytes to read.
459 */
460static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
461{
462 switch (cbValue)
463 {
464 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
465 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
466 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
467 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
468 default:
469 {
470 uint8_t *pb = (uint8_t *)pvValue;
471 while (cbValue--)
472 *pb++ = UINT8_C(0x00);
473 break;
474 }
475 }
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Wrapper which does the read and updates range statistics when such are enabled.
482 */
483DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
484{
485#ifdef VBOX_WITH_STATISTICS
486 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
487 Assert(pStats);
488 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
489#endif
490
491 VBOXSTRICTRC rc;
492 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
493 {
494 if ( ( cbValue == 4
495 && !(GCPhys & 3))
496 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
497 || ( cbValue == 8
498 && !(GCPhys & 7)
499 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
500 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
501 else
502 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
503 }
504 else
505 rc = VINF_IOM_MMIO_UNUSED_FF;
506 if (rc != VINF_SUCCESS)
507 {
508 switch (VBOXSTRICTRC_VAL(rc))
509 {
510 case VINF_IOM_MMIO_UNUSED_FF: rc = iomMMIODoReadFFs(pvValue, cbValue); break;
511 case VINF_IOM_MMIO_UNUSED_00: rc = iomMMIODoRead00s(pvValue, cbValue); break;
512 }
513 }
514 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
515 STAM_COUNTER_INC(&pStats->Accesses);
516 return VBOXSTRICTRC_VAL(rc);
517}
518
519
520/**
521 * Internal - statistics only.
522 */
523DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
524{
525#ifdef VBOX_WITH_STATISTICS
526 switch (cb)
527 {
528 case 1:
529 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
530 break;
531 case 2:
532 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
533 break;
534 case 4:
535 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
536 break;
537 case 8:
538 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
539 break;
540 default:
541 /* No way. */
542 AssertMsgFailed(("Invalid data length %d\n", cb));
543 break;
544 }
545#else
546 NOREF(pVM); NOREF(cb);
547#endif
548}
549
550
551/**
552 * MOV reg, mem (read)
553 * MOVZX reg, mem (read)
554 * MOVSX reg, mem (read)
555 *
556 * @returns VBox status code.
557 *
558 * @param pVM The virtual machine.
559 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
560 * @param pCpu Disassembler CPU state.
561 * @param pRange Pointer MMIO range.
562 * @param GCPhysFault The GC physical address corresponding to pvFault.
563 */
564static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
565{
566 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
567
568 /*
569 * Get the data size from parameter 2,
570 * and call the handler function to get the data.
571 */
572 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
573 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
574
575 uint64_t u64Data = 0;
576 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
577 if (rc == VINF_SUCCESS)
578 {
579 /*
580 * Do sign extension for MOVSX.
581 */
582 /** @todo checkup MOVSX implementation! */
583 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
584 {
585 if (cb == 1)
586 {
587 /* DWORD <- BYTE */
588 int64_t iData = (int8_t)u64Data;
589 u64Data = (uint64_t)iData;
590 }
591 else
592 {
593 /* DWORD <- WORD */
594 int64_t iData = (int16_t)u64Data;
595 u64Data = (uint64_t)iData;
596 }
597 }
598
599 /*
600 * Store the result to register (parameter 1).
601 */
602 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
603 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
604 }
605
606 if (rc == VINF_SUCCESS)
607 iomMMIOStatLength(pVM, cb);
608 return rc;
609}
610
611
612/**
613 * MOV mem, reg|imm (write)
614 *
615 * @returns VBox status code.
616 *
617 * @param pVM The virtual machine.
618 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
619 * @param pCpu Disassembler CPU state.
620 * @param pRange Pointer MMIO range.
621 * @param GCPhysFault The GC physical address corresponding to pvFault.
622 */
623static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
624{
625 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
626
627 /*
628 * Get data to write from second parameter,
629 * and call the callback to write it.
630 */
631 unsigned cb = 0;
632 uint64_t u64Data = 0;
633 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
634 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
635
636 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
637 if (rc == VINF_SUCCESS)
638 iomMMIOStatLength(pVM, cb);
639 return rc;
640}
641
642
643/** Wrapper for reading virtual memory. */
644DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
645{
646 /* Note: This will fail in R0 or RC if it hits an access handler. That
647 isn't a problem though since the operation can be restarted in REM. */
648#ifdef IN_RC
649 NOREF(pVCpu);
650 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
651 /* Page may be protected and not directly accessible. */
652 if (rc == VERR_ACCESS_DENIED)
653 rc = VINF_IOM_R3_IOPORT_WRITE;
654 return rc;
655#else
656 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
657#endif
658}
659
660
661/** Wrapper for writing virtual memory. */
662DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
663{
664 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
665 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
666 * as well since we're not behind the pgm lock and handler may change between calls.
667 *
668 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
669 * the state of some shadowed structures. */
670#if defined(IN_RING0) || defined(IN_RC)
671 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
672#else
673 NOREF(pCtxCore);
674 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
675#endif
676}
677
678
679#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
680/**
681 * [REP] MOVSB
682 * [REP] MOVSW
683 * [REP] MOVSD
684 *
685 * Restricted implementation.
686 *
687 *
688 * @returns VBox status code.
689 *
690 * @param pVM The virtual machine.
691 * @param uErrorCode CPU Error code.
692 * @param pRegFrame Trap register frame.
693 * @param GCPhysFault The GC physical address corresponding to pvFault.
694 * @param pCpu Disassembler CPU state.
695 * @param pRange Pointer MMIO range.
696 * @param ppStat Which sub-sample to attribute this call to.
697 */
698static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
699 PSTAMPROFILE *ppStat)
700{
701 /*
702 * We do not support segment prefixes or REPNE.
703 */
704 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
705 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
706
707 PVMCPU pVCpu = VMMGetCpu(pVM);
708
709 /*
710 * Get bytes/words/dwords/qword count to copy.
711 */
712 uint32_t cTransfers = 1;
713 if (pCpu->fPrefix & DISPREFIX_REP)
714 {
715#ifndef IN_RC
716 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
717 && pRegFrame->rcx >= _4G)
718 return VINF_EM_RAW_EMULATE_INSTR;
719#endif
720
721 cTransfers = pRegFrame->ecx;
722 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
723 cTransfers &= 0xffff;
724
725 if (!cTransfers)
726 return VINF_SUCCESS;
727 }
728
729 /* Get the current privilege level. */
730 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
731
732 /*
733 * Get data size.
734 */
735 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
736 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
737 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
738
739#ifdef VBOX_WITH_STATISTICS
740 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
741 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
742#endif
743
744/** @todo re-evaluate on page boundaries. */
745
746 RTGCPHYS Phys = GCPhysFault;
747 int rc;
748 if (fWriteAccess)
749 {
750 /*
751 * Write operation: [Mem] -> [MMIO]
752 * ds:esi (Virt Src) -> es:edi (Phys Dst)
753 */
754 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
755
756 /* Check callback. */
757 if (!pRange->CTX_SUFF(pfnWriteCallback))
758 return VINF_IOM_R3_MMIO_WRITE;
759
760 /* Convert source address ds:esi. */
761 RTGCUINTPTR pu8Virt;
762 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
763 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
764 (PRTGCPTR)&pu8Virt);
765 if (RT_SUCCESS(rc))
766 {
767
768 /* Access verification first; we currently can't recover properly from traps inside this instruction */
769 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
770 if (rc != VINF_SUCCESS)
771 {
772 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
773 return VINF_EM_RAW_EMULATE_INSTR;
774 }
775
776#ifdef IN_RC
777 MMGCRamRegisterTrapHandler(pVM);
778#endif
779
780 /* copy loop. */
781 while (cTransfers)
782 {
783 uint32_t u32Data = 0;
784 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
785 if (rc != VINF_SUCCESS)
786 break;
787 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
788 if (rc != VINF_SUCCESS)
789 break;
790
791 pu8Virt += offIncrement;
792 Phys += offIncrement;
793 pRegFrame->rsi += offIncrement;
794 pRegFrame->rdi += offIncrement;
795 cTransfers--;
796 }
797#ifdef IN_RC
798 MMGCRamDeregisterTrapHandler(pVM);
799#endif
800 /* Update ecx. */
801 if (pCpu->fPrefix & DISPREFIX_REP)
802 pRegFrame->ecx = cTransfers;
803 }
804 else
805 rc = VINF_IOM_R3_MMIO_READ_WRITE;
806 }
807 else
808 {
809 /*
810 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
811 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
812 */
813 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
814
815 /* Check callback. */
816 if (!pRange->CTX_SUFF(pfnReadCallback))
817 return VINF_IOM_R3_MMIO_READ;
818
819 /* Convert destination address. */
820 RTGCUINTPTR pu8Virt;
821 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
822 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
823 (RTGCPTR *)&pu8Virt);
824 if (RT_FAILURE(rc))
825 return VINF_IOM_R3_MMIO_READ;
826
827 /* Check if destination address is MMIO. */
828 PIOMMMIORANGE pMMIODst;
829 RTGCPHYS PhysDst;
830 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
831 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
832 if ( RT_SUCCESS(rc)
833 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
834 {
835 /** @todo implement per-device locks for MMIO access. */
836 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
837
838 /*
839 * Extra: [MMIO] -> [MMIO]
840 */
841 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
842 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
843 {
844 iomMmioReleaseRange(pVM, pRange);
845 return VINF_IOM_R3_MMIO_READ_WRITE;
846 }
847
848 /* copy loop. */
849 while (cTransfers)
850 {
851 uint32_t u32Data;
852 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
853 if (rc != VINF_SUCCESS)
854 break;
855 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
856 if (rc != VINF_SUCCESS)
857 break;
858
859 Phys += offIncrement;
860 PhysDst += offIncrement;
861 pRegFrame->rsi += offIncrement;
862 pRegFrame->rdi += offIncrement;
863 cTransfers--;
864 }
865 iomMmioReleaseRange(pVM, pRange);
866 }
867 else
868 {
869 /*
870 * Normal: [MMIO] -> [Mem]
871 */
872 /* Access verification first; we currently can't recover properly from traps inside this instruction */
873 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
874 if (rc != VINF_SUCCESS)
875 {
876 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
877 return VINF_EM_RAW_EMULATE_INSTR;
878 }
879
880 /* copy loop. */
881#ifdef IN_RC
882 MMGCRamRegisterTrapHandler(pVM);
883#endif
884 while (cTransfers)
885 {
886 uint32_t u32Data;
887 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
888 if (rc != VINF_SUCCESS)
889 break;
890 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
891 if (rc != VINF_SUCCESS)
892 {
893 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
894 break;
895 }
896
897 pu8Virt += offIncrement;
898 Phys += offIncrement;
899 pRegFrame->rsi += offIncrement;
900 pRegFrame->rdi += offIncrement;
901 cTransfers--;
902 }
903#ifdef IN_RC
904 MMGCRamDeregisterTrapHandler(pVM);
905#endif
906 }
907
908 /* Update ecx on exit. */
909 if (pCpu->fPrefix & DISPREFIX_REP)
910 pRegFrame->ecx = cTransfers;
911 }
912
913 /* work statistics. */
914 if (rc == VINF_SUCCESS)
915 iomMMIOStatLength(pVM, cb);
916 NOREF(ppStat);
917 return rc;
918}
919#endif /* IOM_WITH_MOVS_SUPPORT */
920
921
922/**
923 * Gets the address / opcode mask corresponding to the given CPU mode.
924 *
925 * @returns Mask.
926 * @param enmCpuMode CPU mode.
927 */
928static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
929{
930 switch (enmCpuMode)
931 {
932 case DISCPUMODE_16BIT: return UINT16_MAX;
933 case DISCPUMODE_32BIT: return UINT32_MAX;
934 case DISCPUMODE_64BIT: return UINT64_MAX;
935 default:
936 AssertFailedReturn(UINT32_MAX);
937 }
938}
939
940
941/**
942 * [REP] STOSB
943 * [REP] STOSW
944 * [REP] STOSD
945 *
946 * Restricted implementation.
947 *
948 *
949 * @returns VBox status code.
950 *
951 * @param pVM The virtual machine.
952 * @param pRegFrame Trap register frame.
953 * @param GCPhysFault The GC physical address corresponding to pvFault.
954 * @param pCpu Disassembler CPU state.
955 * @param pRange Pointer MMIO range.
956 */
957static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
958{
959 /*
960 * We do not support segment prefixes or REPNE..
961 */
962 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
963 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
964
965 /*
966 * Get bytes/words/dwords/qwords count to copy.
967 */
968 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
969 RTGCUINTREG cTransfers = 1;
970 if (pCpu->fPrefix & DISPREFIX_REP)
971 {
972#ifndef IN_RC
973 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM))
974 && pRegFrame->rcx >= _4G)
975 return VINF_EM_RAW_EMULATE_INSTR;
976#endif
977
978 cTransfers = pRegFrame->rcx & fAddrMask;
979 if (!cTransfers)
980 return VINF_SUCCESS;
981 }
982
983/** @todo r=bird: bounds checks! */
984
985 /*
986 * Get data size.
987 */
988 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
989 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
990 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
991
992#ifdef VBOX_WITH_STATISTICS
993 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
994 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
995#endif
996
997
998 RTGCPHYS Phys = GCPhysFault;
999 int rc;
1000 if ( pRange->CTX_SUFF(pfnFillCallback)
1001 && cb <= 4 /* can only fill 32-bit values */)
1002 {
1003 /*
1004 * Use the fill callback.
1005 */
1006 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1007 if (offIncrement > 0)
1008 {
1009 /* addr++ variant. */
1010 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1011 pRegFrame->eax, cb, cTransfers);
1012 if (rc == VINF_SUCCESS)
1013 {
1014 /* Update registers. */
1015 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1016 | (pRegFrame->rdi & ~fAddrMask);
1017 if (pCpu->fPrefix & DISPREFIX_REP)
1018 pRegFrame->rcx &= ~fAddrMask;
1019 }
1020 }
1021 else
1022 {
1023 /* addr-- variant. */
1024 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1025 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1026 pRegFrame->eax, cb, cTransfers);
1027 if (rc == VINF_SUCCESS)
1028 {
1029 /* Update registers. */
1030 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1031 | (pRegFrame->rdi & ~fAddrMask);
1032 if (pCpu->fPrefix & DISPREFIX_REP)
1033 pRegFrame->rcx &= ~fAddrMask;
1034 }
1035 }
1036 }
1037 else
1038 {
1039 /*
1040 * Use the write callback.
1041 */
1042 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1043 uint64_t u64Data = pRegFrame->rax;
1044
1045 /* fill loop. */
1046 do
1047 {
1048 rc = iomMMIODoWrite(pVM, pRange, Phys, &u64Data, cb);
1049 if (rc != VINF_SUCCESS)
1050 break;
1051
1052 Phys += offIncrement;
1053 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1054 | (pRegFrame->rdi & ~fAddrMask);
1055 cTransfers--;
1056 } while (cTransfers);
1057
1058 /* Update rcx on exit. */
1059 if (pCpu->fPrefix & DISPREFIX_REP)
1060 pRegFrame->rcx = (cTransfers & fAddrMask)
1061 | (pRegFrame->rcx & ~fAddrMask);
1062 }
1063
1064 /*
1065 * Work statistics and return.
1066 */
1067 if (rc == VINF_SUCCESS)
1068 iomMMIOStatLength(pVM, cb);
1069 return rc;
1070}
1071
1072
1073/**
1074 * [REP] LODSB
1075 * [REP] LODSW
1076 * [REP] LODSD
1077 *
1078 * Restricted implementation.
1079 *
1080 *
1081 * @returns VBox status code.
1082 *
1083 * @param pVM The virtual machine.
1084 * @param pRegFrame Trap register frame.
1085 * @param GCPhysFault The GC physical address corresponding to pvFault.
1086 * @param pCpu Disassembler CPU state.
1087 * @param pRange Pointer MMIO range.
1088 */
1089static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1090{
1091 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1092
1093 /*
1094 * We do not support segment prefixes or REP*.
1095 */
1096 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1097 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1098
1099 /*
1100 * Get data size.
1101 */
1102 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1103 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1104 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1105
1106 /*
1107 * Perform read.
1108 */
1109 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
1110 if (rc == VINF_SUCCESS)
1111 {
1112 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1113 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1114 | (pRegFrame->rsi & ~fAddrMask);
1115 }
1116
1117 /*
1118 * Work statistics and return.
1119 */
1120 if (rc == VINF_SUCCESS)
1121 iomMMIOStatLength(pVM, cb);
1122 return rc;
1123}
1124
1125
1126/**
1127 * CMP [MMIO], reg|imm
1128 * CMP reg|imm, [MMIO]
1129 *
1130 * Restricted implementation.
1131 *
1132 *
1133 * @returns VBox status code.
1134 *
1135 * @param pVM The virtual machine.
1136 * @param pRegFrame Trap register frame.
1137 * @param GCPhysFault The GC physical address corresponding to pvFault.
1138 * @param pCpu Disassembler CPU state.
1139 * @param pRange Pointer MMIO range.
1140 */
1141static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1142{
1143 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1144
1145 /*
1146 * Get the operands.
1147 */
1148 unsigned cb = 0;
1149 uint64_t uData1 = 0;
1150 uint64_t uData2 = 0;
1151 int rc;
1152 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1153 /* cmp reg, [MMIO]. */
1154 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1155 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1156 /* cmp [MMIO], reg|imm. */
1157 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1158 else
1159 {
1160 AssertMsgFailed(("Disassember CMP problem..\n"));
1161 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1162 }
1163
1164 if (rc == VINF_SUCCESS)
1165 {
1166#if HC_ARCH_BITS == 32
1167 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1168 if (cb > 4)
1169 return VINF_IOM_R3_MMIO_READ_WRITE;
1170#endif
1171 /* Emulate CMP and update guest flags. */
1172 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1173 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1174 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1175 iomMMIOStatLength(pVM, cb);
1176 }
1177
1178 return rc;
1179}
1180
1181
1182/**
1183 * AND [MMIO], reg|imm
1184 * AND reg, [MMIO]
1185 * OR [MMIO], reg|imm
1186 * OR reg, [MMIO]
1187 *
1188 * Restricted implementation.
1189 *
1190 *
1191 * @returns VBox status code.
1192 *
1193 * @param pVM The virtual machine.
1194 * @param pRegFrame Trap register frame.
1195 * @param GCPhysFault The GC physical address corresponding to pvFault.
1196 * @param pCpu Disassembler CPU state.
1197 * @param pRange Pointer MMIO range.
1198 * @param pfnEmulate Instruction emulation function.
1199 */
1200static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1201{
1202 unsigned cb = 0;
1203 uint64_t uData1 = 0;
1204 uint64_t uData2 = 0;
1205 bool fAndWrite;
1206 int rc;
1207
1208#ifdef LOG_ENABLED
1209 const char *pszInstr;
1210
1211 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1212 pszInstr = "Xor";
1213 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1214 pszInstr = "Or";
1215 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1216 pszInstr = "And";
1217 else
1218 pszInstr = "OrXorAnd??";
1219#endif
1220
1221 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1222 {
1223#if HC_ARCH_BITS == 32
1224 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1225 if (cb > 4)
1226 return VINF_IOM_R3_MMIO_READ_WRITE;
1227#endif
1228 /* and reg, [MMIO]. */
1229 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1230 fAndWrite = false;
1231 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1232 }
1233 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1234 {
1235#if HC_ARCH_BITS == 32
1236 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1237 if (cb > 4)
1238 return VINF_IOM_R3_MMIO_READ_WRITE;
1239#endif
1240 /* and [MMIO], reg|imm. */
1241 fAndWrite = true;
1242 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1243 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1244 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1245 else
1246 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1247 }
1248 else
1249 {
1250 AssertMsgFailed(("Disassember AND problem..\n"));
1251 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1252 }
1253
1254 if (rc == VINF_SUCCESS)
1255 {
1256 /* Emulate AND and update guest flags. */
1257 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1258
1259 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1260
1261 if (fAndWrite)
1262 /* Store result to MMIO. */
1263 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1264 else
1265 {
1266 /* Store result to register. */
1267 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1268 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1269 }
1270 if (rc == VINF_SUCCESS)
1271 {
1272 /* Update guest's eflags and finish. */
1273 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1274 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1275 iomMMIOStatLength(pVM, cb);
1276 }
1277 }
1278
1279 return rc;
1280}
1281
1282
1283/**
1284 * TEST [MMIO], reg|imm
1285 * TEST reg, [MMIO]
1286 *
1287 * Restricted implementation.
1288 *
1289 *
1290 * @returns VBox status code.
1291 *
1292 * @param pVM The virtual machine.
1293 * @param pRegFrame Trap register frame.
1294 * @param GCPhysFault The GC physical address corresponding to pvFault.
1295 * @param pCpu Disassembler CPU state.
1296 * @param pRange Pointer MMIO range.
1297 */
1298static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1299{
1300 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1301
1302 unsigned cb = 0;
1303 uint64_t uData1 = 0;
1304 uint64_t uData2 = 0;
1305 int rc;
1306
1307 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1308 {
1309 /* and test, [MMIO]. */
1310 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1311 }
1312 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1313 {
1314 /* test [MMIO], reg|imm. */
1315 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1316 }
1317 else
1318 {
1319 AssertMsgFailed(("Disassember TEST problem..\n"));
1320 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1321 }
1322
1323 if (rc == VINF_SUCCESS)
1324 {
1325#if HC_ARCH_BITS == 32
1326 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1327 if (cb > 4)
1328 return VINF_IOM_R3_MMIO_READ_WRITE;
1329#endif
1330
1331 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1332 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1333 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1334 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1335 iomMMIOStatLength(pVM, cb);
1336 }
1337
1338 return rc;
1339}
1340
1341
1342/**
1343 * BT [MMIO], reg|imm
1344 *
1345 * Restricted implementation.
1346 *
1347 *
1348 * @returns VBox status code.
1349 *
1350 * @param pVM The virtual machine.
1351 * @param pRegFrame Trap register frame.
1352 * @param GCPhysFault The GC physical address corresponding to pvFault.
1353 * @param pCpu Disassembler CPU state.
1354 * @param pRange Pointer MMIO range.
1355 */
1356static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1357{
1358 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1359
1360 uint64_t uBit = 0;
1361 uint64_t uData = 0;
1362 unsigned cbIgnored;
1363
1364 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1365 {
1366 AssertMsgFailed(("Disassember BT problem..\n"));
1367 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1368 }
1369 /* The size of the memory operand only matters here. */
1370 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1371
1372 /* bt [MMIO], reg|imm. */
1373 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData);
1374 if (rc == VINF_SUCCESS)
1375 {
1376 /* Find the bit inside the faulting address */
1377 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1378 iomMMIOStatLength(pVM, cbData);
1379 }
1380
1381 return rc;
1382}
1383
1384/**
1385 * XCHG [MMIO], reg
1386 * XCHG reg, [MMIO]
1387 *
1388 * Restricted implementation.
1389 *
1390 *
1391 * @returns VBox status code.
1392 *
1393 * @param pVM The virtual machine.
1394 * @param pRegFrame Trap register frame.
1395 * @param GCPhysFault The GC physical address corresponding to pvFault.
1396 * @param pCpu Disassembler CPU state.
1397 * @param pRange Pointer MMIO range.
1398 */
1399static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1400{
1401 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1402 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1403 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1404 return VINF_IOM_R3_MMIO_READ_WRITE;
1405
1406 int rc;
1407 unsigned cb = 0;
1408 uint64_t uData1 = 0;
1409 uint64_t uData2 = 0;
1410 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1411 {
1412 /* xchg reg, [MMIO]. */
1413 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1414 if (rc == VINF_SUCCESS)
1415 {
1416 /* Store result to MMIO. */
1417 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1418
1419 if (rc == VINF_SUCCESS)
1420 {
1421 /* Store result to register. */
1422 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1423 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1424 }
1425 else
1426 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1427 }
1428 else
1429 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1430 }
1431 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1432 {
1433 /* xchg [MMIO], reg. */
1434 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1435 if (rc == VINF_SUCCESS)
1436 {
1437 /* Store result to MMIO. */
1438 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1439 if (rc == VINF_SUCCESS)
1440 {
1441 /* Store result to register. */
1442 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1443 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1444 }
1445 else
1446 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1447 }
1448 else
1449 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1450 }
1451 else
1452 {
1453 AssertMsgFailed(("Disassember XCHG problem..\n"));
1454 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1455 }
1456 return rc;
1457}
1458
1459
1460/**
1461 * \#PF Handler callback for MMIO ranges.
1462 *
1463 * @returns VBox status code (appropriate for GC return).
1464 * @param pVM Pointer to the VM.
1465 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1466 * any error code (the EPT misconfig hack).
1467 * @param pCtxCore Trap register frame.
1468 * @param GCPhysFault The GC physical address corresponding to pvFault.
1469 * @param pvUser Pointer to the MMIO ring-3 range entry.
1470 */
1471static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1472{
1473 /* Take the IOM lock before performing any MMIO. */
1474 int rc = IOM_LOCK(pVM);
1475#ifndef IN_RING3
1476 if (rc == VERR_SEM_BUSY)
1477 return VINF_IOM_R3_MMIO_READ_WRITE;
1478#endif
1479 AssertRC(rc);
1480
1481 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1482 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n",
1483 GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1484
1485 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1486 Assert(pRange);
1487 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1488
1489#ifdef VBOX_WITH_STATISTICS
1490 /*
1491 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1492 */
1493 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
1494 if (!pStats)
1495 {
1496# ifdef IN_RING3
1497 IOM_UNLOCK(pVM);
1498 return VERR_NO_MEMORY;
1499# else
1500 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1501 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1502 IOM_UNLOCK(pVM);
1503 return VINF_IOM_R3_MMIO_READ_WRITE;
1504# endif
1505 }
1506#endif
1507
1508#ifndef IN_RING3
1509 /*
1510 * Should we defer the request right away? This isn't usually the case, so
1511 * do the simple test first and the try deal with uErrorCode being N/A.
1512 */
1513 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1514 || !pRange->CTX_SUFF(pfnReadCallback))
1515 && ( uErrorCode == UINT32_MAX
1516 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1517 : uErrorCode & X86_TRAP_PF_RW
1518 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1519 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1520 )
1521 )
1522 )
1523 {
1524 if (uErrorCode & X86_TRAP_PF_RW)
1525 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1526 else
1527 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1528
1529 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1530 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1531 IOM_UNLOCK(pVM);
1532 return VINF_IOM_R3_MMIO_READ_WRITE;
1533 }
1534#endif /* !IN_RING3 */
1535
1536 /*
1537 * Retain the range and do locking.
1538 */
1539 iomMmioRetainRange(pRange);
1540 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1541 IOM_UNLOCK(pVM);
1542 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1543 if (rc != VINF_SUCCESS)
1544 {
1545 iomMmioReleaseRange(pVM, pRange);
1546 return rc;
1547 }
1548
1549 /*
1550 * Disassemble the instruction and interpret it.
1551 */
1552 PVMCPU pVCpu = VMMGetCpu(pVM);
1553 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1554 unsigned cbOp;
1555 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1556 if (RT_FAILURE(rc))
1557 {
1558 iomMmioReleaseRange(pVM, pRange);
1559 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1560 return rc;
1561 }
1562 switch (pDis->pCurInstr->uOpcode)
1563 {
1564 case OP_MOV:
1565 case OP_MOVZX:
1566 case OP_MOVSX:
1567 {
1568 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1569 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1570 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1571 ? uErrorCode & X86_TRAP_PF_RW
1572 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1573 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1574 else
1575 rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1576 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1577 break;
1578 }
1579
1580
1581#ifdef IOM_WITH_MOVS_SUPPORT
1582 case OP_MOVSB:
1583 case OP_MOVSWD:
1584 {
1585 if (uErrorCode == UINT32_MAX)
1586 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1587 else
1588 {
1589 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1590 PSTAMPROFILE pStat = NULL;
1591 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1592 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1593 }
1594 break;
1595 }
1596#endif
1597
1598 case OP_STOSB:
1599 case OP_STOSWD:
1600 Assert(uErrorCode & X86_TRAP_PF_RW);
1601 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1602 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1603 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1604 break;
1605
1606 case OP_LODSB:
1607 case OP_LODSWD:
1608 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1609 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1610 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1611 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1612 break;
1613
1614 case OP_CMP:
1615 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1616 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1617 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1618 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1619 break;
1620
1621 case OP_AND:
1622 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1623 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1624 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1625 break;
1626
1627 case OP_OR:
1628 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1629 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1630 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1631 break;
1632
1633 case OP_XOR:
1634 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1635 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1636 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1637 break;
1638
1639 case OP_TEST:
1640 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1641 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1642 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1643 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1644 break;
1645
1646 case OP_BT:
1647 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1648 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1649 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1650 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1651 break;
1652
1653 case OP_XCHG:
1654 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1655 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1656 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1657 break;
1658
1659
1660 /*
1661 * The instruction isn't supported. Hand it on to ring-3.
1662 */
1663 default:
1664 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1665 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1666 break;
1667 }
1668
1669 /*
1670 * On success advance EIP.
1671 */
1672 if (rc == VINF_SUCCESS)
1673 pCtxCore->rip += cbOp;
1674 else
1675 {
1676 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1677#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1678 switch (rc)
1679 {
1680 case VINF_IOM_R3_MMIO_READ:
1681 case VINF_IOM_R3_MMIO_READ_WRITE:
1682 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1683 break;
1684 case VINF_IOM_R3_MMIO_WRITE:
1685 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1686 break;
1687 }
1688#endif
1689 }
1690
1691 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1692 iomMmioReleaseRange(pVM, pRange);
1693 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1694 return rc;
1695}
1696
1697/**
1698 * \#PF Handler callback for MMIO ranges.
1699 *
1700 * @returns VBox status code (appropriate for GC return).
1701 * @param pVM Pointer to the VM.
1702 * @param uErrorCode CPU Error code.
1703 * @param pCtxCore Trap register frame.
1704 * @param pvFault The fault address (cr2).
1705 * @param GCPhysFault The GC physical address corresponding to pvFault.
1706 * @param pvUser Pointer to the MMIO ring-3 range entry.
1707 */
1708VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1709{
1710 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1711 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1712 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1713 return VBOXSTRICTRC_VAL(rcStrict);
1714}
1715
1716/**
1717 * Physical access handler for MMIO ranges.
1718 *
1719 * @returns VBox status code (appropriate for GC return).
1720 * @param pVM Pointer to the VM.
1721 * @param uErrorCode CPU Error code.
1722 * @param pCtxCore Trap register frame.
1723 * @param GCPhysFault The GC physical address.
1724 */
1725VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1726{
1727 int rc2 = IOM_LOCK(pVM); NOREF(rc2);
1728#ifndef IN_RING3
1729 if (rc2 == VERR_SEM_BUSY)
1730 return VINF_IOM_R3_MMIO_READ_WRITE;
1731#endif
1732 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMmioGetRange(pVM, GCPhysFault));
1733 IOM_UNLOCK(pVM);
1734 return VBOXSTRICTRC_VAL(rcStrict);
1735}
1736
1737
1738#ifdef IN_RING3
1739/**
1740 * \#PF Handler callback for MMIO ranges.
1741 *
1742 * @returns VINF_SUCCESS if the handler have carried out the operation.
1743 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1744 * @param pVM Pointer to the VM.
1745 * @param GCPhys The physical address the guest is writing to.
1746 * @param pvPhys The HC mapping of that address.
1747 * @param pvBuf What the guest is reading/writing.
1748 * @param cbBuf How much it's reading/writing.
1749 * @param enmAccessType The access type.
1750 * @param pvUser Pointer to the MMIO range entry.
1751 */
1752DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf,
1753 PGMACCESSTYPE enmAccessType, void *pvUser)
1754{
1755 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1756 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1757
1758 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1759 AssertPtr(pRange);
1760 NOREF(pvPhys);
1761
1762 /*
1763 * Validate the range.
1764 */
1765 int rc = IOM_LOCK(pVM);
1766 AssertRC(rc);
1767 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1768
1769 /*
1770 * Perform locking.
1771 */
1772 iomMmioRetainRange(pRange);
1773 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1774 IOM_UNLOCK(pVM);
1775 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1776 if (rc != VINF_SUCCESS)
1777 {
1778 iomMmioReleaseRange(pVM, pRange);
1779 return rc;
1780 }
1781
1782 /*
1783 * Perform the access.
1784 */
1785 if (enmAccessType == PGMACCESSTYPE_READ)
1786 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1787 else
1788 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1789
1790 AssertRC(rc);
1791 iomMmioReleaseRange(pVM, pRange);
1792 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1793 return rc;
1794}
1795#endif /* IN_RING3 */
1796
1797
1798/**
1799 * Reads a MMIO register.
1800 *
1801 * @returns VBox status code.
1802 *
1803 * @param pVM Pointer to the VM.
1804 * @param GCPhys The physical address to read.
1805 * @param pu32Value Where to store the value read.
1806 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1807 */
1808VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1809{
1810 /* Take the IOM lock before performing any MMIO. */
1811 VBOXSTRICTRC rc = IOM_LOCK(pVM);
1812#ifndef IN_RING3
1813 if (rc == VERR_SEM_BUSY)
1814 return VINF_IOM_R3_MMIO_WRITE;
1815#endif
1816 AssertRC(VBOXSTRICTRC_VAL(rc));
1817#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1818 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
1819#endif
1820
1821 /*
1822 * Lookup the current context range node and statistics.
1823 */
1824 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1825 if (!pRange)
1826 {
1827 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1828 IOM_UNLOCK(pVM);
1829 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1830 }
1831#ifdef VBOX_WITH_STATISTICS
1832 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1833 if (!pStats)
1834 {
1835 IOM_UNLOCK(pVM);
1836# ifdef IN_RING3
1837 return VERR_NO_MEMORY;
1838# else
1839 return VINF_IOM_R3_MMIO_READ;
1840# endif
1841 }
1842 STAM_COUNTER_INC(&pStats->Accesses);
1843#endif /* VBOX_WITH_STATISTICS */
1844
1845 if (pRange->CTX_SUFF(pfnReadCallback))
1846 {
1847 /*
1848 * Perform locking.
1849 */
1850 iomMmioRetainRange(pRange);
1851 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1852 IOM_UNLOCK(pVM);
1853 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
1854 if (rc != VINF_SUCCESS)
1855 {
1856 iomMmioReleaseRange(pVM, pRange);
1857 return rc;
1858 }
1859
1860 /*
1861 * Perform the read and deal with the result.
1862 */
1863 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1864 if ( (cbValue == 4 && !(GCPhys & 3))
1865 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
1866 || (cbValue == 8 && !(GCPhys & 7)) )
1867 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
1868 pu32Value, (unsigned)cbValue);
1869 else
1870 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
1871 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1872 switch (VBOXSTRICTRC_VAL(rc))
1873 {
1874 case VINF_SUCCESS:
1875 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1876 iomMmioReleaseRange(pVM, pRange);
1877 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1878 return rc;
1879#ifndef IN_RING3
1880 case VINF_IOM_R3_MMIO_READ:
1881 case VINF_IOM_R3_MMIO_READ_WRITE:
1882 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1883#endif
1884 default:
1885 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1886 iomMmioReleaseRange(pVM, pRange);
1887 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1888 return rc;
1889
1890 case VINF_IOM_MMIO_UNUSED_00:
1891 iomMMIODoRead00s(pu32Value, cbValue);
1892 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1893 iomMmioReleaseRange(pVM, pRange);
1894 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1895 return VINF_SUCCESS;
1896
1897 case VINF_IOM_MMIO_UNUSED_FF:
1898 iomMMIODoReadFFs(pu32Value, cbValue);
1899 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1900 iomMmioReleaseRange(pVM, pRange);
1901 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1902 return VINF_SUCCESS;
1903 }
1904 /* not reached */
1905 }
1906#ifndef IN_RING3
1907 if (pRange->pfnReadCallbackR3)
1908 {
1909 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1910 IOM_UNLOCK(pVM);
1911 return VINF_IOM_R3_MMIO_READ;
1912 }
1913#endif
1914
1915 /*
1916 * Unassigned memory - this is actually not supposed t happen...
1917 */
1918 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1919 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1920 iomMMIODoReadFFs(pu32Value, cbValue);
1921 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1922 IOM_UNLOCK(pVM);
1923 return VINF_SUCCESS;
1924}
1925
1926
1927/**
1928 * Writes to a MMIO register.
1929 *
1930 * @returns VBox status code.
1931 *
1932 * @param pVM Pointer to the VM.
1933 * @param GCPhys The physical address to write to.
1934 * @param u32Value The value to write.
1935 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1936 */
1937VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1938{
1939 /* Take the IOM lock before performing any MMIO. */
1940 VBOXSTRICTRC rc = IOM_LOCK(pVM);
1941#ifndef IN_RING3
1942 if (rc == VERR_SEM_BUSY)
1943 return VINF_IOM_R3_MMIO_WRITE;
1944#endif
1945 AssertRC(VBOXSTRICTRC_VAL(rc));
1946#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1947 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1948#endif
1949
1950 /*
1951 * Lookup the current context range node.
1952 */
1953 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1954 if (!pRange)
1955 {
1956 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1957 IOM_UNLOCK(pVM);
1958 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1959 }
1960#ifdef VBOX_WITH_STATISTICS
1961 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1962 if (!pStats)
1963 {
1964 IOM_UNLOCK(pVM);
1965# ifdef IN_RING3
1966 return VERR_NO_MEMORY;
1967# else
1968 return VINF_IOM_R3_MMIO_WRITE;
1969# endif
1970 }
1971 STAM_COUNTER_INC(&pStats->Accesses);
1972#endif /* VBOX_WITH_STATISTICS */
1973
1974 if (pRange->CTX_SUFF(pfnWriteCallback))
1975 {
1976 /*
1977 * Perform locking.
1978 */
1979 iomMmioRetainRange(pRange);
1980 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1981 IOM_UNLOCK(pVM);
1982 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
1983 if (rc != VINF_SUCCESS)
1984 {
1985 iomMmioReleaseRange(pVM, pRange);
1986 return rc;
1987 }
1988
1989 /*
1990 * Perform the write.
1991 */
1992 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1993 if ( (cbValue == 4 && !(GCPhys & 3))
1994 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
1995 || (cbValue == 8 && !(GCPhys & 7)) )
1996 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1997 GCPhys, &u32Value, (unsigned)cbValue);
1998 else
1999 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2000 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2001#ifndef IN_RING3
2002 if ( rc == VINF_IOM_R3_MMIO_WRITE
2003 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2004 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2005#endif
2006 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2007 iomMmioReleaseRange(pVM, pRange);
2008 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2009 return rc;
2010 }
2011#ifndef IN_RING3
2012 if (pRange->pfnWriteCallbackR3)
2013 {
2014 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2015 IOM_UNLOCK(pVM);
2016 return VINF_IOM_R3_MMIO_WRITE;
2017 }
2018#endif
2019
2020 /*
2021 * No write handler, nothing to do.
2022 */
2023 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2024 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2025 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2026 IOM_UNLOCK(pVM);
2027 return VINF_SUCCESS;
2028}
2029
2030
2031/**
2032 * [REP*] INSB/INSW/INSD
2033 * ES:EDI,DX[,ECX]
2034 *
2035 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2036 *
2037 * @returns Strict VBox status code. Informational status codes other than the one documented
2038 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2039 * @retval VINF_SUCCESS Success.
2040 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2041 * status code must be passed on to EM.
2042 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2043 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2044 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2045 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2046 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2047 *
2048 * @param pVM The virtual machine.
2049 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2050 * @param uPort IO Port
2051 * @param uPrefix IO instruction prefix
2052 * @param enmAddrMode The address mode.
2053 * @param cbTransfer Size of transfer unit
2054 */
2055VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2056 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2057{
2058 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2059
2060 /*
2061 * We do not support REPNE or decrementing destination
2062 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2063 */
2064 if ( (uPrefix & DISPREFIX_REPNE)
2065 || pRegFrame->eflags.Bits.u1DF)
2066 return VINF_EM_RAW_EMULATE_INSTR;
2067
2068 PVMCPU pVCpu = VMMGetCpu(pVM);
2069
2070 /*
2071 * Get bytes/words/dwords count to transfer.
2072 */
2073 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2074 RTGCUINTREG cTransfers = 1;
2075 if (uPrefix & DISPREFIX_REP)
2076 {
2077#ifndef IN_RC
2078 if ( CPUMIsGuestIn64BitCode(pVCpu)
2079 && pRegFrame->rcx >= _4G)
2080 return VINF_EM_RAW_EMULATE_INSTR;
2081#endif
2082 cTransfers = pRegFrame->rcx & fAddrMask;
2083 if (!cTransfers)
2084 return VINF_SUCCESS;
2085 }
2086
2087 /* Convert destination address es:edi. */
2088 RTGCPTR GCPtrDst;
2089 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2090 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2091 &GCPtrDst);
2092 if (RT_FAILURE(rc2))
2093 {
2094 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2095 return VINF_EM_RAW_EMULATE_INSTR;
2096 }
2097
2098 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2099 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2100 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2101 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2102 if (rc2 != VINF_SUCCESS)
2103 {
2104 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2105 return VINF_EM_RAW_EMULATE_INSTR;
2106 }
2107
2108 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2109 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2110 if (cTransfers > 1)
2111 {
2112 /* If the device supports string transfers, ask it to do as
2113 * much as it wants. The rest is done with single-word transfers. */
2114 const RTGCUINTREG cTransfersOrg = cTransfers;
2115 rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
2116 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2117 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2118 | (pRegFrame->rdi & ~fAddrMask);
2119 }
2120
2121#ifdef IN_RC
2122 MMGCRamRegisterTrapHandler(pVM);
2123#endif
2124 while (cTransfers && rcStrict == VINF_SUCCESS)
2125 {
2126 uint32_t u32Value;
2127 rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
2128 if (!IOM_SUCCESS(rcStrict))
2129 break;
2130 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2131 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2132 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2133 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2134 | (pRegFrame->rdi & ~fAddrMask);
2135 cTransfers--;
2136 }
2137#ifdef IN_RC
2138 MMGCRamDeregisterTrapHandler(pVM);
2139#endif
2140
2141 /* Update rcx on exit. */
2142 if (uPrefix & DISPREFIX_REP)
2143 pRegFrame->rcx = (cTransfers & fAddrMask)
2144 | (pRegFrame->rcx & ~fAddrMask);
2145
2146 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2147 return rcStrict;
2148}
2149
2150
2151/**
2152 * [REP*] INSB/INSW/INSD
2153 * ES:EDI,DX[,ECX]
2154 *
2155 * @returns Strict VBox status code. Informational status codes other than the one documented
2156 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2157 * @retval VINF_SUCCESS Success.
2158 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2159 * status code must be passed on to EM.
2160 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2161 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2162 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2163 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2164 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2165 *
2166 * @param pVM The virtual machine.
2167 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2168 * @param pCpu Disassembler CPU state.
2169 */
2170VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2171{
2172 /*
2173 * Get port number directly from the register (no need to bother the
2174 * disassembler). And get the I/O register size from the opcode / prefix.
2175 */
2176 RTIOPORT Port = pRegFrame->edx & 0xffff;
2177 unsigned cb = 0;
2178 if (pCpu->pCurInstr->uOpcode == OP_INSB)
2179 cb = 1;
2180 else
2181 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2182
2183 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2184 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2185 {
2186 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2187 return rcStrict;
2188 }
2189
2190 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2191}
2192
2193
2194/**
2195 * [REP*] OUTSB/OUTSW/OUTSD
2196 * DS:ESI,DX[,ECX]
2197 *
2198 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2199 *
2200 * @returns Strict VBox status code. Informational status codes other than the one documented
2201 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2202 * @retval VINF_SUCCESS Success.
2203 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2204 * status code must be passed on to EM.
2205 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2206 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2207 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2208 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2209 *
2210 * @param pVM The virtual machine.
2211 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2212 * @param uPort IO Port
2213 * @param uPrefix IO instruction prefix
2214 * @param enmAddrMode The address mode.
2215 * @param cbTransfer Size of transfer unit
2216 */
2217VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2218 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2219{
2220 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2221
2222 /*
2223 * We do not support segment prefixes, REPNE or
2224 * decrementing source pointer.
2225 */
2226 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2227 || pRegFrame->eflags.Bits.u1DF)
2228 return VINF_EM_RAW_EMULATE_INSTR;
2229
2230 PVMCPU pVCpu = VMMGetCpu(pVM);
2231
2232 /*
2233 * Get bytes/words/dwords count to transfer.
2234 */
2235 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2236 RTGCUINTREG cTransfers = 1;
2237 if (uPrefix & DISPREFIX_REP)
2238 {
2239#ifndef IN_RC
2240 if ( CPUMIsGuestIn64BitCode(pVCpu)
2241 && pRegFrame->rcx >= _4G)
2242 return VINF_EM_RAW_EMULATE_INSTR;
2243#endif
2244 cTransfers = pRegFrame->rcx & fAddrMask;
2245 if (!cTransfers)
2246 return VINF_SUCCESS;
2247 }
2248
2249 /* Convert source address ds:esi. */
2250 RTGCPTR GCPtrSrc;
2251 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2252 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2253 &GCPtrSrc);
2254 if (RT_FAILURE(rc2))
2255 {
2256 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2257 return VINF_EM_RAW_EMULATE_INSTR;
2258 }
2259
2260 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2261 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2262 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2263 (cpl == 3) ? X86_PTE_US : 0);
2264 if (rc2 != VINF_SUCCESS)
2265 {
2266 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2267 return VINF_EM_RAW_EMULATE_INSTR;
2268 }
2269
2270 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2271 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2272 if (cTransfers > 1)
2273 {
2274 /*
2275 * If the device supports string transfers, ask it to do as
2276 * much as it wants. The rest is done with single-word transfers.
2277 */
2278 const RTGCUINTREG cTransfersOrg = cTransfers;
2279 rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
2280 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2281 pRegFrame->rsi = ((pRegFrame->rsi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2282 | (pRegFrame->rsi & ~fAddrMask);
2283 }
2284
2285#ifdef IN_RC
2286 MMGCRamRegisterTrapHandler(pVM);
2287#endif
2288
2289 while (cTransfers && rcStrict == VINF_SUCCESS)
2290 {
2291 uint32_t u32Value = 0;
2292 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2293 if (rcStrict != VINF_SUCCESS)
2294 break;
2295 rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
2296 if (!IOM_SUCCESS(rcStrict))
2297 break;
2298 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2299 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2300 | (pRegFrame->rsi & ~fAddrMask);
2301 cTransfers--;
2302 }
2303
2304#ifdef IN_RC
2305 MMGCRamDeregisterTrapHandler(pVM);
2306#endif
2307
2308 /* Update rcx on exit. */
2309 if (uPrefix & DISPREFIX_REP)
2310 pRegFrame->rcx = (cTransfers & fAddrMask)
2311 | (pRegFrame->rcx & ~fAddrMask);
2312
2313 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2314 return rcStrict;
2315}
2316
2317
2318/**
2319 * [REP*] OUTSB/OUTSW/OUTSD
2320 * DS:ESI,DX[,ECX]
2321 *
2322 * @returns Strict VBox status code. Informational status codes other than the one documented
2323 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2324 * @retval VINF_SUCCESS Success.
2325 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2326 * status code must be passed on to EM.
2327 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2328 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
2329 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2330 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2331 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2332 *
2333 * @param pVM The virtual machine.
2334 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2335 * @param pCpu Disassembler CPU state.
2336 */
2337VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2338{
2339 /*
2340 * Get port number from the first parameter.
2341 * And get the I/O register size from the opcode / prefix.
2342 */
2343 uint64_t Port = 0;
2344 unsigned cb = 0;
2345 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &Port, &cb);
2346 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
2347 if (pCpu->pCurInstr->uOpcode == OP_OUTSB)
2348 cb = 1;
2349 else
2350 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2351
2352 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2353 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2354 {
2355 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2356 return rcStrict;
2357 }
2358
2359 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2360}
2361
2362#ifndef IN_RC
2363
2364/**
2365 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2366 *
2367 * (This is a special optimization used by the VGA device.)
2368 *
2369 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2370 * remapping is made,.
2371 *
2372 * @param pVM The virtual machine.
2373 * @param GCPhys The address of the MMIO page to be changed.
2374 * @param GCPhysRemapped The address of the MMIO2 page.
2375 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2376 * for the time being.
2377 */
2378VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2379{
2380 /* Currently only called from the VGA device during MMIO. */
2381 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2382 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2383 PVMCPU pVCpu = VMMGetCpu(pVM);
2384
2385 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2386 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2387 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2388 && !HMIsNestedPagingActive(pVM)))
2389 return VINF_SUCCESS; /* ignore */
2390
2391 int rc = IOM_LOCK(pVM);
2392 if (RT_FAILURE(rc))
2393 return VINF_SUCCESS; /* better luck the next time around */
2394
2395 /*
2396 * Lookup the context range node the page belongs to.
2397 */
2398 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
2399 AssertMsgReturn(pRange,
2400 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2401
2402 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2403 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2404
2405 /*
2406 * Do the aliasing; page align the addresses since PGM is picky.
2407 */
2408 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2409 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2410
2411 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2412
2413 IOM_UNLOCK(pVM);
2414 AssertRCReturn(rc, rc);
2415
2416 /*
2417 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2418 * can simply prefetch it.
2419 *
2420 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2421 */
2422#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2423# ifdef VBOX_STRICT
2424 uint64_t fFlags;
2425 RTHCPHYS HCPhys;
2426 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2427 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2428# endif
2429#endif
2430 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2431 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2432 return VINF_SUCCESS;
2433}
2434
2435
2436/**
2437 * Mapping a HC page in place of an MMIO page for direct access.
2438 *
2439 * (This is a special optimization used by the APIC in the VT-x case.)
2440 *
2441 * @returns VBox status code.
2442 *
2443 * @param pVM Pointer to the VM.
2444 * @param pVCpu Pointer to the VMCPU.
2445 * @param GCPhys The address of the MMIO page to be changed.
2446 * @param HCPhys The address of the host physical page.
2447 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2448 * for the time being.
2449 */
2450VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2451{
2452 /* Currently only called from VT-x code during a page fault. */
2453 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2454
2455 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2456 Assert(HMIsEnabled(pVM));
2457
2458 /*
2459 * Lookup the context range node the page belongs to.
2460 */
2461#ifdef VBOX_STRICT
2462 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2463 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2464 AssertMsgReturn(pRange,
2465 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2466 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2467 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2468#endif
2469
2470 /*
2471 * Do the aliasing; page align the addresses since PGM is picky.
2472 */
2473 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2474 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2475
2476 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2477 AssertRCReturn(rc, rc);
2478
2479 /*
2480 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2481 * can simply prefetch it.
2482 *
2483 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2484 */
2485 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2486 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2487 return VINF_SUCCESS;
2488}
2489
2490
2491/**
2492 * Reset a previously modified MMIO region; restore the access flags.
2493 *
2494 * @returns VBox status code.
2495 *
2496 * @param pVM The virtual machine.
2497 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2498 */
2499VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2500{
2501 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2502
2503 PVMCPU pVCpu = VMMGetCpu(pVM);
2504
2505 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2506 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2507 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2508 && !HMIsNestedPagingActive(pVM)))
2509 return VINF_SUCCESS; /* ignore */
2510
2511 /*
2512 * Lookup the context range node the page belongs to.
2513 */
2514#ifdef VBOX_STRICT
2515 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2516 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2517 AssertMsgReturn(pRange,
2518 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2519 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2520 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2521#endif
2522
2523 /*
2524 * Call PGM to do the job work.
2525 *
2526 * After the call, all the pages should be non-present... unless there is
2527 * a page pool flush pending (unlikely).
2528 */
2529 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2530 AssertRC(rc);
2531
2532#ifdef VBOX_STRICT
2533 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2534 {
2535 uint32_t cb = pRange->cb;
2536 GCPhys = pRange->GCPhys;
2537 while (cb)
2538 {
2539 uint64_t fFlags;
2540 RTHCPHYS HCPhys;
2541 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2542 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2543 cb -= PAGE_SIZE;
2544 GCPhys += PAGE_SIZE;
2545 }
2546 }
2547#endif
2548 return rc;
2549}
2550
2551#endif /* !IN_RC */
2552
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette