VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 44573

Last change on this file since 44573 was 44573, checked in by vboxsync, 12 years ago

IOM: Fixed IOMMMIO_FLAGS_WRITE_ONLY_DWORD and added IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 89.0 KB
Line 
1/* $Id: IOMAllMMIO.cpp 44573 2013-02-06 15:24:36Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
32# include <VBox/vmm/iem.h>
33#endif
34#include "IOMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/hm.h>
38#include "IOMInline.h"
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/vmm/pdmdev.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <VBox/log.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54
55/**
56 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
57 */
58static const unsigned g_aSize2Shift[] =
59{
60 ~0U, /* 0 - invalid */
61 0, /* *1 == 2^0 */
62 1, /* *2 == 2^1 */
63 ~0U, /* 3 - invalid */
64 2, /* *4 == 2^2 */
65 ~0U, /* 5 - invalid */
66 ~0U, /* 6 - invalid */
67 ~0U, /* 7 - invalid */
68 3 /* *8 == 2^3 */
69};
70
71/**
72 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
73 */
74#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
75
76
77/**
78 * Deals with complicated MMIO writes.
79 *
80 * Complicated means unaligned or non-dword/qword sized accesses depending on
81 * the MMIO region's access mode flags.
82 *
83 * @returns Strict VBox status code. Any EM scheduling status code,
84 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
85 * VINF_IOM_R3_MMIO_READ may be returned.
86 *
87 * @param pVM Pointer to the VM.
88 * @param pRange The range to write to.
89 * @param GCPhys The physical address to start writing.
90 * @param pvValue Where to store the value.
91 * @param cbValue The size of the value to write.
92 */
93static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
94{
95 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
96 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
97 VERR_IOM_MMIO_IPE_1);
98 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
99 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
100 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
101 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
102
103 /*
104 * Do debug stop if requested.
105 */
106 int rc = VINF_SUCCESS; NOREF(pVM);
107#ifdef VBOX_STRICT
108 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
109 {
110# ifdef IN_RING3
111 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
112 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
113 if (rc == VERR_DBGF_NOT_ATTACHED)
114 rc = VINF_SUCCESS;
115# else
116 return VINF_IOM_R3_MMIO_WRITE;
117# endif
118 }
119#endif
120
121 /*
122 * Check if we should ignore the write.
123 */
124 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
125 {
126 Assert(cbValue != 4 || (GCPhys & 3));
127 return VINF_SUCCESS;
128 }
129 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
130 {
131 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
132 return VINF_SUCCESS;
133 }
134
135 /*
136 * Split and conquer.
137 */
138 for (;;)
139 {
140 unsigned const offAccess = GCPhys & 3;
141 unsigned cbThisPart = 4 - offAccess;
142 if (cbThisPart > cbValue)
143 cbThisPart = cbValue;
144
145 /*
146 * Get the missing bits (if any).
147 */
148 uint32_t u32MissingValue = 0;
149 if (fReadMissing && cbThisPart != 4)
150 {
151 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
152 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
153 switch (rc2)
154 {
155 case VINF_SUCCESS:
156 break;
157 case VINF_IOM_MMIO_UNUSED_FF:
158 u32MissingValue = UINT32_C(0xffffffff);
159 break;
160 case VINF_IOM_MMIO_UNUSED_00:
161 u32MissingValue = 0;
162 break;
163 case VINF_IOM_R3_MMIO_READ:
164 case VINF_IOM_R3_MMIO_READ_WRITE:
165 case VINF_IOM_R3_MMIO_WRITE:
166 /** @todo What if we've split a transfer and already read
167 * something? Since writes generally have sideeffects we
168 * could be kind of screwed here...
169 *
170 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
171 * to REM for MMIO accesses (like may currently do). */
172
173 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
174 return rc2;
175 default:
176 if (RT_FAILURE(rc2))
177 {
178 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
179 return rc2;
180 }
181 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
182 if (rc == VINF_SUCCESS || rc2 < rc)
183 rc = rc2;
184 break;
185 }
186 }
187
188 /*
189 * Merge missing and given bits.
190 */
191 uint32_t u32GivenMask;
192 uint32_t u32GivenValue;
193 switch (cbThisPart)
194 {
195 case 1:
196 u32GivenValue = *(uint8_t const *)pvValue;
197 u32GivenMask = UINT32_C(0x000000ff);
198 break;
199 case 2:
200 u32GivenValue = *(uint16_t const *)pvValue;
201 u32GivenMask = UINT32_C(0x0000ffff);
202 break;
203 case 3:
204 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
205 ((uint8_t const *)pvValue)[2], 0);
206 u32GivenMask = UINT32_C(0x00ffffff);
207 break;
208 case 4:
209 u32GivenValue = *(uint32_t const *)pvValue;
210 u32GivenMask = UINT32_C(0xffffffff);
211 break;
212 default:
213 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
214 }
215 if (offAccess)
216 {
217 u32GivenValue <<= offAccess * 8;
218 u32GivenMask <<= offAccess * 8;
219 }
220
221 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
222 | (u32GivenValue & u32GivenMask);
223
224 /*
225 * Do DWORD write to the device.
226 */
227 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
228 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
229 switch (rc2)
230 {
231 case VINF_SUCCESS:
232 break;
233 case VINF_IOM_R3_MMIO_READ:
234 case VINF_IOM_R3_MMIO_READ_WRITE:
235 case VINF_IOM_R3_MMIO_WRITE:
236 /** @todo What if we've split a transfer and already read
237 * something? Since reads can have sideeffects we could be
238 * kind of screwed here...
239 *
240 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
241 * to REM for MMIO accesses (like may currently do). */
242 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
243 return rc2;
244 default:
245 if (RT_FAILURE(rc2))
246 {
247 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
248 return rc2;
249 }
250 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
251 if (rc == VINF_SUCCESS || rc2 < rc)
252 rc = rc2;
253 break;
254 }
255
256 /*
257 * Advance.
258 */
259 cbValue -= cbThisPart;
260 if (!cbValue)
261 break;
262 GCPhys += cbThisPart;
263 pvValue = (uint8_t const *)pvValue + cbThisPart;
264 }
265
266 return rc;
267}
268
269
270
271
272/**
273 * Wrapper which does the write and updates range statistics when such are enabled.
274 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
275 */
276static int iomMMIODoWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
277{
278#ifdef VBOX_WITH_STATISTICS
279 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
280 Assert(pStats);
281#endif
282
283 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
284 VBOXSTRICTRC rc;
285 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
286 {
287 if ( (cb == 4 && !(GCPhysFault & 3))
288 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
289 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
290 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
291 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
292 else
293 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
294 }
295 else
296 rc = VINF_SUCCESS;
297 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
298 STAM_COUNTER_INC(&pStats->Accesses);
299 return VBOXSTRICTRC_TODO(rc);
300}
301
302
303/**
304 * Deals with complicated MMIO reads.
305 *
306 * Complicatd means unaligned or non-dword/qword align accesses depending on
307 * the MMIO region's access mode flags.
308 *
309 * @returns Strict VBox status code. Any EM scheduling status code,
310 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
311 * VINF_IOM_R3_MMIO_WRITE may be returned.
312 *
313 * @param pVM Pointer to the VM.
314 * @param pRange The range to read from.
315 * @param GCPhys The physical address to start reading.
316 * @param pvValue Where to store the value.
317 * @param cbValue The size of the value to read.
318 */
319static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
320{
321 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
322 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
323 VERR_IOM_MMIO_IPE_1);
324 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
325 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
326
327 /*
328 * Do debug stop if requested.
329 */
330 int rc = VINF_SUCCESS; NOREF(pVM);
331#ifdef VBOX_STRICT
332 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
333 {
334# ifdef IN_RING3
335 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
336 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
337 if (rc == VERR_DBGF_NOT_ATTACHED)
338 rc = VINF_SUCCESS;
339# else
340 return VINF_IOM_R3_MMIO_READ;
341# endif
342 }
343#endif
344
345 /*
346 * Split and conquer.
347 */
348 for (;;)
349 {
350 /*
351 * Do DWORD read from the device.
352 */
353 uint32_t u32Value;
354 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
355 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
356 switch (rc2)
357 {
358 case VINF_SUCCESS:
359 break;
360 case VINF_IOM_MMIO_UNUSED_FF:
361 u32Value = UINT32_C(0xffffffff);
362 break;
363 case VINF_IOM_MMIO_UNUSED_00:
364 u32Value = 0;
365 break;
366 case VINF_IOM_R3_MMIO_READ:
367 case VINF_IOM_R3_MMIO_READ_WRITE:
368 case VINF_IOM_R3_MMIO_WRITE:
369 /** @todo What if we've split a transfer and already read
370 * something? Since reads can have sideeffects we could be
371 * kind of screwed here... */
372 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
373 return rc2;
374 default:
375 if (RT_FAILURE(rc2))
376 {
377 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
378 return rc2;
379 }
380 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
381 if (rc == VINF_SUCCESS || rc2 < rc)
382 rc = rc2;
383 break;
384 }
385 u32Value >>= (GCPhys & 3) * 8;
386
387 /*
388 * Write what we've read.
389 */
390 unsigned cbThisPart = 4 - (GCPhys & 3);
391 if (cbThisPart > cbValue)
392 cbThisPart = cbValue;
393
394 switch (cbThisPart)
395 {
396 case 1:
397 *(uint8_t *)pvValue = (uint8_t)u32Value;
398 break;
399 case 2:
400 *(uint16_t *)pvValue = (uint16_t)u32Value;
401 break;
402 case 3:
403 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
404 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
405 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
406 break;
407 case 4:
408 *(uint32_t *)pvValue = u32Value;
409 break;
410 }
411
412 /*
413 * Advance.
414 */
415 cbValue -= cbThisPart;
416 if (!cbValue)
417 break;
418 GCPhys += cbThisPart;
419 pvValue = (uint8_t *)pvValue + cbThisPart;
420 }
421
422 return rc;
423}
424
425
426/**
427 * Implements VINF_IOM_MMIO_UNUSED_FF.
428 *
429 * @returns VINF_SUCCESS.
430 * @param pvValue Where to store the zeros.
431 * @param cbValue How many bytes to read.
432 */
433static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
434{
435 switch (cbValue)
436 {
437 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
438 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
439 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
440 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
441 default:
442 {
443 uint8_t *pb = (uint8_t *)pvValue;
444 while (cbValue--)
445 *pb++ = UINT8_C(0xff);
446 break;
447 }
448 }
449 return VINF_SUCCESS;
450}
451
452
453/**
454 * Implements VINF_IOM_MMIO_UNUSED_00.
455 *
456 * @returns VINF_SUCCESS.
457 * @param pvValue Where to store the zeros.
458 * @param cbValue How many bytes to read.
459 */
460static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
461{
462 switch (cbValue)
463 {
464 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
465 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
466 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
467 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
468 default:
469 {
470 uint8_t *pb = (uint8_t *)pvValue;
471 while (cbValue--)
472 *pb++ = UINT8_C(0x00);
473 break;
474 }
475 }
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Wrapper which does the read and updates range statistics when such are enabled.
482 */
483DECLINLINE(int) iomMMIODoRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
484{
485#ifdef VBOX_WITH_STATISTICS
486 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
487 Assert(pStats);
488 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
489#endif
490
491 VBOXSTRICTRC rc;
492 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
493 {
494 if ( (cbValue == 4 && !(GCPhys & 3))
495 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
496 || (cbValue == 8 && !(GCPhys & 7)) )
497 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
498 else
499 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
500 }
501 else
502 rc = VINF_IOM_MMIO_UNUSED_FF;
503 if (rc != VINF_SUCCESS)
504 {
505 switch (VBOXSTRICTRC_VAL(rc))
506 {
507 case VINF_IOM_MMIO_UNUSED_FF: rc = iomMMIODoReadFFs(pvValue, cbValue); break;
508 case VINF_IOM_MMIO_UNUSED_00: rc = iomMMIODoRead00s(pvValue, cbValue); break;
509 }
510 }
511 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
512 STAM_COUNTER_INC(&pStats->Accesses);
513 return VBOXSTRICTRC_VAL(rc);
514}
515
516
517/**
518 * Internal - statistics only.
519 */
520DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
521{
522#ifdef VBOX_WITH_STATISTICS
523 switch (cb)
524 {
525 case 1:
526 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
527 break;
528 case 2:
529 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
530 break;
531 case 4:
532 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
533 break;
534 case 8:
535 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
536 break;
537 default:
538 /* No way. */
539 AssertMsgFailed(("Invalid data length %d\n", cb));
540 break;
541 }
542#else
543 NOREF(pVM); NOREF(cb);
544#endif
545}
546
547
548/**
549 * MOV reg, mem (read)
550 * MOVZX reg, mem (read)
551 * MOVSX reg, mem (read)
552 *
553 * @returns VBox status code.
554 *
555 * @param pVM The virtual machine.
556 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
557 * @param pCpu Disassembler CPU state.
558 * @param pRange Pointer MMIO range.
559 * @param GCPhysFault The GC physical address corresponding to pvFault.
560 */
561static int iomInterpretMOVxXRead(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
562{
563 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
564
565 /*
566 * Get the data size from parameter 2,
567 * and call the handler function to get the data.
568 */
569 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
570 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
571
572 uint64_t u64Data = 0;
573 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &u64Data, cb);
574 if (rc == VINF_SUCCESS)
575 {
576 /*
577 * Do sign extension for MOVSX.
578 */
579 /** @todo checkup MOVSX implementation! */
580 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
581 {
582 if (cb == 1)
583 {
584 /* DWORD <- BYTE */
585 int64_t iData = (int8_t)u64Data;
586 u64Data = (uint64_t)iData;
587 }
588 else
589 {
590 /* DWORD <- WORD */
591 int64_t iData = (int16_t)u64Data;
592 u64Data = (uint64_t)iData;
593 }
594 }
595
596 /*
597 * Store the result to register (parameter 1).
598 */
599 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
600 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
601 }
602
603 if (rc == VINF_SUCCESS)
604 iomMMIOStatLength(pVM, cb);
605 return rc;
606}
607
608
609/**
610 * MOV mem, reg|imm (write)
611 *
612 * @returns VBox status code.
613 *
614 * @param pVM The virtual machine.
615 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
616 * @param pCpu Disassembler CPU state.
617 * @param pRange Pointer MMIO range.
618 * @param GCPhysFault The GC physical address corresponding to pvFault.
619 */
620static int iomInterpretMOVxXWrite(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
621{
622 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
623
624 /*
625 * Get data to write from second parameter,
626 * and call the callback to write it.
627 */
628 unsigned cb = 0;
629 uint64_t u64Data = 0;
630 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
631 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
632
633 int rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &u64Data, cb);
634 if (rc == VINF_SUCCESS)
635 iomMMIOStatLength(pVM, cb);
636 return rc;
637}
638
639
640/** Wrapper for reading virtual memory. */
641DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
642{
643 /* Note: This will fail in R0 or RC if it hits an access handler. That
644 isn't a problem though since the operation can be restarted in REM. */
645#ifdef IN_RC
646 NOREF(pVCpu);
647 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
648 /* Page may be protected and not directly accessible. */
649 if (rc == VERR_ACCESS_DENIED)
650 rc = VINF_IOM_R3_IOPORT_WRITE;
651 return rc;
652#else
653 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb);
654#endif
655}
656
657
658/** Wrapper for writing virtual memory. */
659DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
660{
661 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
662 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
663 * as well since we're not behind the pgm lock and handler may change between calls.
664 *
665 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
666 * the state of some shadowed structures. */
667#if defined(IN_RING0) || defined(IN_RC)
668 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
669#else
670 NOREF(pCtxCore);
671 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb);
672#endif
673}
674
675
676#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
677/**
678 * [REP] MOVSB
679 * [REP] MOVSW
680 * [REP] MOVSD
681 *
682 * Restricted implementation.
683 *
684 *
685 * @returns VBox status code.
686 *
687 * @param pVM The virtual machine.
688 * @param uErrorCode CPU Error code.
689 * @param pRegFrame Trap register frame.
690 * @param GCPhysFault The GC physical address corresponding to pvFault.
691 * @param pCpu Disassembler CPU state.
692 * @param pRange Pointer MMIO range.
693 * @param ppStat Which sub-sample to attribute this call to.
694 */
695static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
696 PSTAMPROFILE *ppStat)
697{
698 /*
699 * We do not support segment prefixes or REPNE.
700 */
701 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
702 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
703
704 PVMCPU pVCpu = VMMGetCpu(pVM);
705
706 /*
707 * Get bytes/words/dwords/qword count to copy.
708 */
709 uint32_t cTransfers = 1;
710 if (pCpu->fPrefix & DISPREFIX_REP)
711 {
712#ifndef IN_RC
713 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
714 && pRegFrame->rcx >= _4G)
715 return VINF_EM_RAW_EMULATE_INSTR;
716#endif
717
718 cTransfers = pRegFrame->ecx;
719 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
720 cTransfers &= 0xffff;
721
722 if (!cTransfers)
723 return VINF_SUCCESS;
724 }
725
726 /* Get the current privilege level. */
727 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
728
729 /*
730 * Get data size.
731 */
732 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
733 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
734 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
735
736#ifdef VBOX_WITH_STATISTICS
737 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
738 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
739#endif
740
741/** @todo re-evaluate on page boundaries. */
742
743 RTGCPHYS Phys = GCPhysFault;
744 int rc;
745 if (fWriteAccess)
746 {
747 /*
748 * Write operation: [Mem] -> [MMIO]
749 * ds:esi (Virt Src) -> es:edi (Phys Dst)
750 */
751 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
752
753 /* Check callback. */
754 if (!pRange->CTX_SUFF(pfnWriteCallback))
755 return VINF_IOM_R3_MMIO_WRITE;
756
757 /* Convert source address ds:esi. */
758 RTGCUINTPTR pu8Virt;
759 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
760 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
761 (PRTGCPTR)&pu8Virt);
762 if (RT_SUCCESS(rc))
763 {
764
765 /* Access verification first; we currently can't recover properly from traps inside this instruction */
766 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
767 if (rc != VINF_SUCCESS)
768 {
769 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
770 return VINF_EM_RAW_EMULATE_INSTR;
771 }
772
773#ifdef IN_RC
774 MMGCRamRegisterTrapHandler(pVM);
775#endif
776
777 /* copy loop. */
778 while (cTransfers)
779 {
780 uint32_t u32Data = 0;
781 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
782 if (rc != VINF_SUCCESS)
783 break;
784 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
785 if (rc != VINF_SUCCESS)
786 break;
787
788 pu8Virt += offIncrement;
789 Phys += offIncrement;
790 pRegFrame->rsi += offIncrement;
791 pRegFrame->rdi += offIncrement;
792 cTransfers--;
793 }
794#ifdef IN_RC
795 MMGCRamDeregisterTrapHandler(pVM);
796#endif
797 /* Update ecx. */
798 if (pCpu->fPrefix & DISPREFIX_REP)
799 pRegFrame->ecx = cTransfers;
800 }
801 else
802 rc = VINF_IOM_R3_MMIO_READ_WRITE;
803 }
804 else
805 {
806 /*
807 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
808 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
809 */
810 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
811
812 /* Check callback. */
813 if (!pRange->CTX_SUFF(pfnReadCallback))
814 return VINF_IOM_R3_MMIO_READ;
815
816 /* Convert destination address. */
817 RTGCUINTPTR pu8Virt;
818 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
819 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
820 (RTGCPTR *)&pu8Virt);
821 if (RT_FAILURE(rc))
822 return VINF_IOM_R3_MMIO_READ;
823
824 /* Check if destination address is MMIO. */
825 PIOMMMIORANGE pMMIODst;
826 RTGCPHYS PhysDst;
827 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
828 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
829 if ( RT_SUCCESS(rc)
830 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
831 {
832 /** @todo implement per-device locks for MMIO access. */
833 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
834
835 /*
836 * Extra: [MMIO] -> [MMIO]
837 */
838 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
839 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
840 {
841 iomMmioReleaseRange(pVM, pRange);
842 return VINF_IOM_R3_MMIO_READ_WRITE;
843 }
844
845 /* copy loop. */
846 while (cTransfers)
847 {
848 uint32_t u32Data;
849 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
850 if (rc != VINF_SUCCESS)
851 break;
852 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
853 if (rc != VINF_SUCCESS)
854 break;
855
856 Phys += offIncrement;
857 PhysDst += offIncrement;
858 pRegFrame->rsi += offIncrement;
859 pRegFrame->rdi += offIncrement;
860 cTransfers--;
861 }
862 iomMmioReleaseRange(pVM, pRange);
863 }
864 else
865 {
866 /*
867 * Normal: [MMIO] -> [Mem]
868 */
869 /* Access verification first; we currently can't recover properly from traps inside this instruction */
870 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
871 if (rc != VINF_SUCCESS)
872 {
873 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
874 return VINF_EM_RAW_EMULATE_INSTR;
875 }
876
877 /* copy loop. */
878#ifdef IN_RC
879 MMGCRamRegisterTrapHandler(pVM);
880#endif
881 while (cTransfers)
882 {
883 uint32_t u32Data;
884 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
885 if (rc != VINF_SUCCESS)
886 break;
887 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
888 if (rc != VINF_SUCCESS)
889 {
890 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
891 break;
892 }
893
894 pu8Virt += offIncrement;
895 Phys += offIncrement;
896 pRegFrame->rsi += offIncrement;
897 pRegFrame->rdi += offIncrement;
898 cTransfers--;
899 }
900#ifdef IN_RC
901 MMGCRamDeregisterTrapHandler(pVM);
902#endif
903 }
904
905 /* Update ecx on exit. */
906 if (pCpu->fPrefix & DISPREFIX_REP)
907 pRegFrame->ecx = cTransfers;
908 }
909
910 /* work statistics. */
911 if (rc == VINF_SUCCESS)
912 iomMMIOStatLength(pVM, cb);
913 NOREF(ppStat);
914 return rc;
915}
916#endif /* IOM_WITH_MOVS_SUPPORT */
917
918
919/**
920 * Gets the address / opcode mask corresponding to the given CPU mode.
921 *
922 * @returns Mask.
923 * @param enmCpuMode CPU mode.
924 */
925static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
926{
927 switch (enmCpuMode)
928 {
929 case DISCPUMODE_16BIT: return UINT16_MAX;
930 case DISCPUMODE_32BIT: return UINT32_MAX;
931 case DISCPUMODE_64BIT: return UINT64_MAX;
932 default:
933 AssertFailedReturn(UINT32_MAX);
934 }
935}
936
937
938/**
939 * [REP] STOSB
940 * [REP] STOSW
941 * [REP] STOSD
942 *
943 * Restricted implementation.
944 *
945 *
946 * @returns VBox status code.
947 *
948 * @param pVM The virtual machine.
949 * @param pRegFrame Trap register frame.
950 * @param GCPhysFault The GC physical address corresponding to pvFault.
951 * @param pCpu Disassembler CPU state.
952 * @param pRange Pointer MMIO range.
953 */
954static int iomInterpretSTOS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
955{
956 /*
957 * We do not support segment prefixes or REPNE..
958 */
959 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
960 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
961
962 /*
963 * Get bytes/words/dwords/qwords count to copy.
964 */
965 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
966 RTGCUINTREG cTransfers = 1;
967 if (pCpu->fPrefix & DISPREFIX_REP)
968 {
969#ifndef IN_RC
970 if ( CPUMIsGuestIn64BitCode(VMMGetCpu(pVM))
971 && pRegFrame->rcx >= _4G)
972 return VINF_EM_RAW_EMULATE_INSTR;
973#endif
974
975 cTransfers = pRegFrame->rcx & fAddrMask;
976 if (!cTransfers)
977 return VINF_SUCCESS;
978 }
979
980/** @todo r=bird: bounds checks! */
981
982 /*
983 * Get data size.
984 */
985 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
986 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
987 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
988
989#ifdef VBOX_WITH_STATISTICS
990 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
991 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
992#endif
993
994
995 RTGCPHYS Phys = GCPhysFault;
996 int rc;
997 if ( pRange->CTX_SUFF(pfnFillCallback)
998 && cb <= 4 /* can only fill 32-bit values */)
999 {
1000 /*
1001 * Use the fill callback.
1002 */
1003 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1004 if (offIncrement > 0)
1005 {
1006 /* addr++ variant. */
1007 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1008 pRegFrame->eax, cb, cTransfers);
1009 if (rc == VINF_SUCCESS)
1010 {
1011 /* Update registers. */
1012 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1013 | (pRegFrame->rdi & ~fAddrMask);
1014 if (pCpu->fPrefix & DISPREFIX_REP)
1015 pRegFrame->rcx &= ~fAddrMask;
1016 }
1017 }
1018 else
1019 {
1020 /* addr-- variant. */
1021 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1022 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1023 pRegFrame->eax, cb, cTransfers);
1024 if (rc == VINF_SUCCESS)
1025 {
1026 /* Update registers. */
1027 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1028 | (pRegFrame->rdi & ~fAddrMask);
1029 if (pCpu->fPrefix & DISPREFIX_REP)
1030 pRegFrame->rcx &= ~fAddrMask;
1031 }
1032 }
1033 }
1034 else
1035 {
1036 /*
1037 * Use the write callback.
1038 */
1039 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1040 uint64_t u64Data = pRegFrame->rax;
1041
1042 /* fill loop. */
1043 do
1044 {
1045 rc = iomMMIODoWrite(pVM, pRange, Phys, &u64Data, cb);
1046 if (rc != VINF_SUCCESS)
1047 break;
1048
1049 Phys += offIncrement;
1050 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1051 | (pRegFrame->rdi & ~fAddrMask);
1052 cTransfers--;
1053 } while (cTransfers);
1054
1055 /* Update rcx on exit. */
1056 if (pCpu->fPrefix & DISPREFIX_REP)
1057 pRegFrame->rcx = (cTransfers & fAddrMask)
1058 | (pRegFrame->rcx & ~fAddrMask);
1059 }
1060
1061 /*
1062 * Work statistics and return.
1063 */
1064 if (rc == VINF_SUCCESS)
1065 iomMMIOStatLength(pVM, cb);
1066 return rc;
1067}
1068
1069
1070/**
1071 * [REP] LODSB
1072 * [REP] LODSW
1073 * [REP] LODSD
1074 *
1075 * Restricted implementation.
1076 *
1077 *
1078 * @returns VBox status code.
1079 *
1080 * @param pVM The virtual machine.
1081 * @param pRegFrame Trap register frame.
1082 * @param GCPhysFault The GC physical address corresponding to pvFault.
1083 * @param pCpu Disassembler CPU state.
1084 * @param pRange Pointer MMIO range.
1085 */
1086static int iomInterpretLODS(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1087{
1088 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1089
1090 /*
1091 * We do not support segment prefixes or REP*.
1092 */
1093 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1094 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1095
1096 /*
1097 * Get data size.
1098 */
1099 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1100 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1101 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1102
1103 /*
1104 * Perform read.
1105 */
1106 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &pRegFrame->rax, cb);
1107 if (rc == VINF_SUCCESS)
1108 {
1109 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1110 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1111 | (pRegFrame->rsi & ~fAddrMask);
1112 }
1113
1114 /*
1115 * Work statistics and return.
1116 */
1117 if (rc == VINF_SUCCESS)
1118 iomMMIOStatLength(pVM, cb);
1119 return rc;
1120}
1121
1122
1123/**
1124 * CMP [MMIO], reg|imm
1125 * CMP reg|imm, [MMIO]
1126 *
1127 * Restricted implementation.
1128 *
1129 *
1130 * @returns VBox status code.
1131 *
1132 * @param pVM The virtual machine.
1133 * @param pRegFrame Trap register frame.
1134 * @param GCPhysFault The GC physical address corresponding to pvFault.
1135 * @param pCpu Disassembler CPU state.
1136 * @param pRange Pointer MMIO range.
1137 */
1138static int iomInterpretCMP(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1139{
1140 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1141
1142 /*
1143 * Get the operands.
1144 */
1145 unsigned cb = 0;
1146 uint64_t uData1 = 0;
1147 uint64_t uData2 = 0;
1148 int rc;
1149 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1150 /* cmp reg, [MMIO]. */
1151 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1152 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1153 /* cmp [MMIO], reg|imm. */
1154 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1155 else
1156 {
1157 AssertMsgFailed(("Disassember CMP problem..\n"));
1158 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1159 }
1160
1161 if (rc == VINF_SUCCESS)
1162 {
1163#if HC_ARCH_BITS == 32
1164 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1165 if (cb > 4)
1166 return VINF_IOM_R3_MMIO_READ_WRITE;
1167#endif
1168 /* Emulate CMP and update guest flags. */
1169 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1170 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1171 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1172 iomMMIOStatLength(pVM, cb);
1173 }
1174
1175 return rc;
1176}
1177
1178
1179/**
1180 * AND [MMIO], reg|imm
1181 * AND reg, [MMIO]
1182 * OR [MMIO], reg|imm
1183 * OR reg, [MMIO]
1184 *
1185 * Restricted implementation.
1186 *
1187 *
1188 * @returns VBox status code.
1189 *
1190 * @param pVM The virtual machine.
1191 * @param pRegFrame Trap register frame.
1192 * @param GCPhysFault The GC physical address corresponding to pvFault.
1193 * @param pCpu Disassembler CPU state.
1194 * @param pRange Pointer MMIO range.
1195 * @param pfnEmulate Instruction emulation function.
1196 */
1197static int iomInterpretOrXorAnd(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1198{
1199 unsigned cb = 0;
1200 uint64_t uData1 = 0;
1201 uint64_t uData2 = 0;
1202 bool fAndWrite;
1203 int rc;
1204
1205#ifdef LOG_ENABLED
1206 const char *pszInstr;
1207
1208 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1209 pszInstr = "Xor";
1210 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1211 pszInstr = "Or";
1212 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1213 pszInstr = "And";
1214 else
1215 pszInstr = "OrXorAnd??";
1216#endif
1217
1218 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1219 {
1220#if HC_ARCH_BITS == 32
1221 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1222 if (cb > 4)
1223 return VINF_IOM_R3_MMIO_READ_WRITE;
1224#endif
1225 /* and reg, [MMIO]. */
1226 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1227 fAndWrite = false;
1228 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1229 }
1230 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1231 {
1232#if HC_ARCH_BITS == 32
1233 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1234 if (cb > 4)
1235 return VINF_IOM_R3_MMIO_READ_WRITE;
1236#endif
1237 /* and [MMIO], reg|imm. */
1238 fAndWrite = true;
1239 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1240 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1241 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1242 else
1243 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1244 }
1245 else
1246 {
1247 AssertMsgFailed(("Disassember AND problem..\n"));
1248 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1249 }
1250
1251 if (rc == VINF_SUCCESS)
1252 {
1253 /* Emulate AND and update guest flags. */
1254 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1255
1256 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1257
1258 if (fAndWrite)
1259 /* Store result to MMIO. */
1260 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1261 else
1262 {
1263 /* Store result to register. */
1264 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1265 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1266 }
1267 if (rc == VINF_SUCCESS)
1268 {
1269 /* Update guest's eflags and finish. */
1270 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1271 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1272 iomMMIOStatLength(pVM, cb);
1273 }
1274 }
1275
1276 return rc;
1277}
1278
1279
1280/**
1281 * TEST [MMIO], reg|imm
1282 * TEST reg, [MMIO]
1283 *
1284 * Restricted implementation.
1285 *
1286 *
1287 * @returns VBox status code.
1288 *
1289 * @param pVM The virtual machine.
1290 * @param pRegFrame Trap register frame.
1291 * @param GCPhysFault The GC physical address corresponding to pvFault.
1292 * @param pCpu Disassembler CPU state.
1293 * @param pRange Pointer MMIO range.
1294 */
1295static int iomInterpretTEST(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1296{
1297 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1298
1299 unsigned cb = 0;
1300 uint64_t uData1 = 0;
1301 uint64_t uData2 = 0;
1302 int rc;
1303
1304 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1305 {
1306 /* and test, [MMIO]. */
1307 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1308 }
1309 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1310 {
1311 /* test [MMIO], reg|imm. */
1312 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1313 }
1314 else
1315 {
1316 AssertMsgFailed(("Disassember TEST problem..\n"));
1317 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1318 }
1319
1320 if (rc == VINF_SUCCESS)
1321 {
1322#if HC_ARCH_BITS == 32
1323 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1324 if (cb > 4)
1325 return VINF_IOM_R3_MMIO_READ_WRITE;
1326#endif
1327
1328 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1329 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1330 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1331 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1332 iomMMIOStatLength(pVM, cb);
1333 }
1334
1335 return rc;
1336}
1337
1338
1339/**
1340 * BT [MMIO], reg|imm
1341 *
1342 * Restricted implementation.
1343 *
1344 *
1345 * @returns VBox status code.
1346 *
1347 * @param pVM The virtual machine.
1348 * @param pRegFrame Trap register frame.
1349 * @param GCPhysFault The GC physical address corresponding to pvFault.
1350 * @param pCpu Disassembler CPU state.
1351 * @param pRange Pointer MMIO range.
1352 */
1353static int iomInterpretBT(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1354{
1355 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1356
1357 uint64_t uBit = 0;
1358 uint64_t uData = 0;
1359 unsigned cbIgnored;
1360
1361 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1362 {
1363 AssertMsgFailed(("Disassember BT problem..\n"));
1364 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1365 }
1366 /* The size of the memory operand only matters here. */
1367 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1368
1369 /* bt [MMIO], reg|imm. */
1370 int rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData, cbData);
1371 if (rc == VINF_SUCCESS)
1372 {
1373 /* Find the bit inside the faulting address */
1374 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1375 iomMMIOStatLength(pVM, cbData);
1376 }
1377
1378 return rc;
1379}
1380
1381/**
1382 * XCHG [MMIO], reg
1383 * XCHG reg, [MMIO]
1384 *
1385 * Restricted implementation.
1386 *
1387 *
1388 * @returns VBox status code.
1389 *
1390 * @param pVM The virtual machine.
1391 * @param pRegFrame Trap register frame.
1392 * @param GCPhysFault The GC physical address corresponding to pvFault.
1393 * @param pCpu Disassembler CPU state.
1394 * @param pRange Pointer MMIO range.
1395 */
1396static int iomInterpretXCHG(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1397{
1398 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1399 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1400 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1401 return VINF_IOM_R3_MMIO_READ_WRITE;
1402
1403 int rc;
1404 unsigned cb = 0;
1405 uint64_t uData1 = 0;
1406 uint64_t uData2 = 0;
1407 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1408 {
1409 /* xchg reg, [MMIO]. */
1410 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData2, cb);
1411 if (rc == VINF_SUCCESS)
1412 {
1413 /* Store result to MMIO. */
1414 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData1, cb);
1415
1416 if (rc == VINF_SUCCESS)
1417 {
1418 /* Store result to register. */
1419 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1420 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1421 }
1422 else
1423 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1424 }
1425 else
1426 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1427 }
1428 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1429 {
1430 /* xchg [MMIO], reg. */
1431 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, &uData1, cb);
1432 if (rc == VINF_SUCCESS)
1433 {
1434 /* Store result to MMIO. */
1435 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, &uData2, cb);
1436 if (rc == VINF_SUCCESS)
1437 {
1438 /* Store result to register. */
1439 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1440 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1441 }
1442 else
1443 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE, ("rc=%Rrc\n", rc));
1444 }
1445 else
1446 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ, ("rc=%Rrc\n", rc));
1447 }
1448 else
1449 {
1450 AssertMsgFailed(("Disassember XCHG problem..\n"));
1451 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1452 }
1453 return rc;
1454}
1455
1456
1457/**
1458 * \#PF Handler callback for MMIO ranges.
1459 *
1460 * @returns VBox status code (appropriate for GC return).
1461 * @param pVM Pointer to the VM.
1462 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1463 * any error code (the EPT misconfig hack).
1464 * @param pCtxCore Trap register frame.
1465 * @param GCPhysFault The GC physical address corresponding to pvFault.
1466 * @param pvUser Pointer to the MMIO ring-3 range entry.
1467 */
1468static int iomMMIOHandler(PVM pVM, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1469{
1470 /* Take the IOM lock before performing any MMIO. */
1471 int rc = IOM_LOCK(pVM);
1472#ifndef IN_RING3
1473 if (rc == VERR_SEM_BUSY)
1474 return VINF_IOM_R3_MMIO_READ_WRITE;
1475#endif
1476 AssertRC(rc);
1477
1478 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1479 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n",
1480 GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1481
1482 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1483 Assert(pRange);
1484 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1485
1486#ifdef VBOX_WITH_STATISTICS
1487 /*
1488 * Locate the statistics, if > PAGE_SIZE we'll use the first byte for everything.
1489 */
1490 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhysFault, pRange);
1491 if (!pStats)
1492 {
1493# ifdef IN_RING3
1494 IOM_UNLOCK(pVM);
1495 return VERR_NO_MEMORY;
1496# else
1497 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1498 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1499 IOM_UNLOCK(pVM);
1500 return VINF_IOM_R3_MMIO_READ_WRITE;
1501# endif
1502 }
1503#endif
1504
1505#ifndef IN_RING3
1506 /*
1507 * Should we defer the request right away? This isn't usually the case, so
1508 * do the simple test first and the try deal with uErrorCode being N/A.
1509 */
1510 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1511 || !pRange->CTX_SUFF(pfnReadCallback))
1512 && ( uErrorCode == UINT32_MAX
1513 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1514 : uErrorCode & X86_TRAP_PF_RW
1515 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1516 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1517 )
1518 )
1519 )
1520 {
1521 if (uErrorCode & X86_TRAP_PF_RW)
1522 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1523 else
1524 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1525
1526 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1527 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1528 IOM_UNLOCK(pVM);
1529 return VINF_IOM_R3_MMIO_READ_WRITE;
1530 }
1531#endif /* !IN_RING3 */
1532
1533 /*
1534 * Retain the range and do locking.
1535 */
1536 iomMmioRetainRange(pRange);
1537 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1538 IOM_UNLOCK(pVM);
1539 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1540 if (rc != VINF_SUCCESS)
1541 {
1542 iomMmioReleaseRange(pVM, pRange);
1543 return rc;
1544 }
1545
1546 /*
1547 * Disassemble the instruction and interpret it.
1548 */
1549 PVMCPU pVCpu = VMMGetCpu(pVM);
1550 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1551 unsigned cbOp;
1552 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1553 if (RT_FAILURE(rc))
1554 {
1555 iomMmioReleaseRange(pVM, pRange);
1556 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1557 return rc;
1558 }
1559 switch (pDis->pCurInstr->uOpcode)
1560 {
1561 case OP_MOV:
1562 case OP_MOVZX:
1563 case OP_MOVSX:
1564 {
1565 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1566 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1567 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1568 ? uErrorCode & X86_TRAP_PF_RW
1569 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1570 rc = iomInterpretMOVxXWrite(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1571 else
1572 rc = iomInterpretMOVxXRead(pVM, pCtxCore, pDis, pRange, GCPhysFault);
1573 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1574 break;
1575 }
1576
1577
1578#ifdef IOM_WITH_MOVS_SUPPORT
1579 case OP_MOVSB:
1580 case OP_MOVSWD:
1581 {
1582 if (uErrorCode == UINT32_MAX)
1583 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1584 else
1585 {
1586 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1587 PSTAMPROFILE pStat = NULL;
1588 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1589 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1590 }
1591 break;
1592 }
1593#endif
1594
1595 case OP_STOSB:
1596 case OP_STOSWD:
1597 Assert(uErrorCode & X86_TRAP_PF_RW);
1598 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1599 rc = iomInterpretSTOS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1600 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1601 break;
1602
1603 case OP_LODSB:
1604 case OP_LODSWD:
1605 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1606 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1607 rc = iomInterpretLODS(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1608 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1609 break;
1610
1611 case OP_CMP:
1612 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1613 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1614 rc = iomInterpretCMP(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1615 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1616 break;
1617
1618 case OP_AND:
1619 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1620 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1621 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1622 break;
1623
1624 case OP_OR:
1625 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1626 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1627 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1628 break;
1629
1630 case OP_XOR:
1631 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1632 rc = iomInterpretOrXorAnd(pVM, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1633 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1634 break;
1635
1636 case OP_TEST:
1637 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1638 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1639 rc = iomInterpretTEST(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1640 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1641 break;
1642
1643 case OP_BT:
1644 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1645 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1646 rc = iomInterpretBT(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1647 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1648 break;
1649
1650 case OP_XCHG:
1651 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1652 rc = iomInterpretXCHG(pVM, pCtxCore, GCPhysFault, pDis, pRange);
1653 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1654 break;
1655
1656
1657 /*
1658 * The instruction isn't supported. Hand it on to ring-3.
1659 */
1660 default:
1661 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1662 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1663 break;
1664 }
1665
1666 /*
1667 * On success advance EIP.
1668 */
1669 if (rc == VINF_SUCCESS)
1670 pCtxCore->rip += cbOp;
1671 else
1672 {
1673 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1674#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1675 switch (rc)
1676 {
1677 case VINF_IOM_R3_MMIO_READ:
1678 case VINF_IOM_R3_MMIO_READ_WRITE:
1679 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1680 break;
1681 case VINF_IOM_R3_MMIO_WRITE:
1682 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1683 break;
1684 }
1685#endif
1686 }
1687
1688 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1689 iomMmioReleaseRange(pVM, pRange);
1690 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1691 return rc;
1692}
1693
1694/**
1695 * \#PF Handler callback for MMIO ranges.
1696 *
1697 * @returns VBox status code (appropriate for GC return).
1698 * @param pVM Pointer to the VM.
1699 * @param uErrorCode CPU Error code.
1700 * @param pCtxCore Trap register frame.
1701 * @param pvFault The fault address (cr2).
1702 * @param GCPhysFault The GC physical address corresponding to pvFault.
1703 * @param pvUser Pointer to the MMIO ring-3 range entry.
1704 */
1705VMMDECL(int) IOMMMIOHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1706{
1707 LogFlow(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1708 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1709 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1710 return VBOXSTRICTRC_VAL(rcStrict);
1711}
1712
1713/**
1714 * Physical access handler for MMIO ranges.
1715 *
1716 * @returns VBox status code (appropriate for GC return).
1717 * @param pVM Pointer to the VM.
1718 * @param uErrorCode CPU Error code.
1719 * @param pCtxCore Trap register frame.
1720 * @param GCPhysFault The GC physical address.
1721 */
1722VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1723{
1724 int rc2 = IOM_LOCK(pVM); NOREF(rc2);
1725#ifndef IN_RING3
1726 if (rc2 == VERR_SEM_BUSY)
1727 return VINF_IOM_R3_MMIO_READ_WRITE;
1728#endif
1729 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, iomMmioGetRange(pVM, GCPhysFault));
1730 IOM_UNLOCK(pVM);
1731 return VBOXSTRICTRC_VAL(rcStrict);
1732}
1733
1734
1735#ifdef IN_RING3
1736/**
1737 * \#PF Handler callback for MMIO ranges.
1738 *
1739 * @returns VINF_SUCCESS if the handler have carried out the operation.
1740 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1741 * @param pVM Pointer to the VM.
1742 * @param GCPhys The physical address the guest is writing to.
1743 * @param pvPhys The HC mapping of that address.
1744 * @param pvBuf What the guest is reading/writing.
1745 * @param cbBuf How much it's reading/writing.
1746 * @param enmAccessType The access type.
1747 * @param pvUser Pointer to the MMIO range entry.
1748 */
1749DECLCALLBACK(int) IOMR3MMIOHandler(PVM pVM, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf,
1750 PGMACCESSTYPE enmAccessType, void *pvUser)
1751{
1752 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1753 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1754
1755 AssertMsg(cbBuf == 1 || cbBuf == 2 || cbBuf == 4 || cbBuf == 8, ("%zu\n", cbBuf));
1756 AssertPtr(pRange);
1757 NOREF(pvPhys);
1758
1759 /*
1760 * Validate the range.
1761 */
1762 int rc = IOM_LOCK(pVM);
1763 AssertRC(rc);
1764 Assert(pRange == iomMmioGetRange(pVM, GCPhysFault));
1765
1766 /*
1767 * Perform locking.
1768 */
1769 iomMmioRetainRange(pRange);
1770 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1771 IOM_UNLOCK(pVM);
1772 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1773 if (rc != VINF_SUCCESS)
1774 {
1775 iomMmioReleaseRange(pVM, pRange);
1776 return rc;
1777 }
1778
1779 /*
1780 * Perform the access.
1781 */
1782 if (enmAccessType == PGMACCESSTYPE_READ)
1783 rc = iomMMIODoRead(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1784 else
1785 rc = iomMMIODoWrite(pVM, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1786
1787 AssertRC(rc);
1788 iomMmioReleaseRange(pVM, pRange);
1789 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1790 return rc;
1791}
1792#endif /* IN_RING3 */
1793
1794
1795/**
1796 * Reads a MMIO register.
1797 *
1798 * @returns VBox status code.
1799 *
1800 * @param pVM Pointer to the VM.
1801 * @param GCPhys The physical address to read.
1802 * @param pu32Value Where to store the value read.
1803 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1804 */
1805VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1806{
1807 /* Take the IOM lock before performing any MMIO. */
1808 VBOXSTRICTRC rc = IOM_LOCK(pVM);
1809#ifndef IN_RING3
1810 if (rc == VERR_SEM_BUSY)
1811 return VINF_IOM_R3_MMIO_WRITE;
1812#endif
1813 AssertRC(VBOXSTRICTRC_VAL(rc));
1814#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1815 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
1816#endif
1817
1818 /*
1819 * Lookup the current context range node and statistics.
1820 */
1821 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1822 if (!pRange)
1823 {
1824 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1825 IOM_UNLOCK(pVM);
1826 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1827 }
1828#ifdef VBOX_WITH_STATISTICS
1829 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1830 if (!pStats)
1831 {
1832 IOM_UNLOCK(pVM);
1833# ifdef IN_RING3
1834 return VERR_NO_MEMORY;
1835# else
1836 return VINF_IOM_R3_MMIO_READ;
1837# endif
1838 }
1839 STAM_COUNTER_INC(&pStats->Accesses);
1840#endif /* VBOX_WITH_STATISTICS */
1841
1842 if (pRange->CTX_SUFF(pfnReadCallback))
1843 {
1844 /*
1845 * Perform locking.
1846 */
1847 iomMmioRetainRange(pRange);
1848 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1849 IOM_UNLOCK(pVM);
1850 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
1851 if (rc != VINF_SUCCESS)
1852 {
1853 iomMmioReleaseRange(pVM, pRange);
1854 return rc;
1855 }
1856
1857 /*
1858 * Perform the read and deal with the result.
1859 */
1860 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1861 if ( (cbValue == 4 && !(GCPhys & 3))
1862 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
1863 || (cbValue == 8 && !(GCPhys & 7)) )
1864 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
1865 pu32Value, (unsigned)cbValue);
1866 else
1867 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
1868 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1869 switch (VBOXSTRICTRC_VAL(rc))
1870 {
1871 case VINF_SUCCESS:
1872 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1873 iomMmioReleaseRange(pVM, pRange);
1874 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1875 return rc;
1876#ifndef IN_RING3
1877 case VINF_IOM_R3_MMIO_READ:
1878 case VINF_IOM_R3_MMIO_READ_WRITE:
1879 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1880#endif
1881 default:
1882 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1883 iomMmioReleaseRange(pVM, pRange);
1884 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1885 return rc;
1886
1887 case VINF_IOM_MMIO_UNUSED_00:
1888 iomMMIODoRead00s(pu32Value, cbValue);
1889 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1890 iomMmioReleaseRange(pVM, pRange);
1891 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1892 return VINF_SUCCESS;
1893
1894 case VINF_IOM_MMIO_UNUSED_FF:
1895 iomMMIODoReadFFs(pu32Value, cbValue);
1896 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1897 iomMmioReleaseRange(pVM, pRange);
1898 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1899 return VINF_SUCCESS;
1900 }
1901 /* not reached */
1902 }
1903#ifndef IN_RING3
1904 if (pRange->pfnReadCallbackR3)
1905 {
1906 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1907 IOM_UNLOCK(pVM);
1908 return VINF_IOM_R3_MMIO_READ;
1909 }
1910#endif
1911
1912 /*
1913 * Unassigned memory - this is actually not supposed t happen...
1914 */
1915 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1916 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1917 iomMMIODoReadFFs(pu32Value, cbValue);
1918 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1919 IOM_UNLOCK(pVM);
1920 return VINF_SUCCESS;
1921}
1922
1923
1924/**
1925 * Writes to a MMIO register.
1926 *
1927 * @returns VBox status code.
1928 *
1929 * @param pVM Pointer to the VM.
1930 * @param GCPhys The physical address to write to.
1931 * @param u32Value The value to write.
1932 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1933 */
1934VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1935{
1936 /* Take the IOM lock before performing any MMIO. */
1937 VBOXSTRICTRC rc = IOM_LOCK(pVM);
1938#ifndef IN_RING3
1939 if (rc == VERR_SEM_BUSY)
1940 return VINF_IOM_R3_MMIO_WRITE;
1941#endif
1942 AssertRC(VBOXSTRICTRC_VAL(rc));
1943#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1944 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
1945#endif
1946
1947 /*
1948 * Lookup the current context range node.
1949 */
1950 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
1951 if (!pRange)
1952 {
1953 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1954 IOM_UNLOCK(pVM);
1955 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1956 }
1957#ifdef VBOX_WITH_STATISTICS
1958 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, GCPhys, pRange);
1959 if (!pStats)
1960 {
1961 IOM_UNLOCK(pVM);
1962# ifdef IN_RING3
1963 return VERR_NO_MEMORY;
1964# else
1965 return VINF_IOM_R3_MMIO_WRITE;
1966# endif
1967 }
1968 STAM_COUNTER_INC(&pStats->Accesses);
1969#endif /* VBOX_WITH_STATISTICS */
1970
1971 if (pRange->CTX_SUFF(pfnWriteCallback))
1972 {
1973 /*
1974 * Perform locking.
1975 */
1976 iomMmioRetainRange(pRange);
1977 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1978 IOM_UNLOCK(pVM);
1979 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
1980 if (rc != VINF_SUCCESS)
1981 {
1982 iomMmioReleaseRange(pVM, pRange);
1983 return rc;
1984 }
1985
1986 /*
1987 * Perform the write.
1988 */
1989 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
1990 if ( (cbValue == 4 && !(GCPhys & 3))
1991 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
1992 || (cbValue == 8 && !(GCPhys & 7)) )
1993 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1994 GCPhys, &u32Value, (unsigned)cbValue);
1995 else
1996 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
1997 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
1998#ifndef IN_RING3
1999 if ( rc == VINF_IOM_R3_MMIO_WRITE
2000 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2001 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2002#endif
2003 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2004 iomMmioReleaseRange(pVM, pRange);
2005 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2006 return rc;
2007 }
2008#ifndef IN_RING3
2009 if (pRange->pfnWriteCallbackR3)
2010 {
2011 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2012 IOM_UNLOCK(pVM);
2013 return VINF_IOM_R3_MMIO_WRITE;
2014 }
2015#endif
2016
2017 /*
2018 * No write handler, nothing to do.
2019 */
2020 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2021 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2022 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2023 IOM_UNLOCK(pVM);
2024 return VINF_SUCCESS;
2025}
2026
2027
2028/**
2029 * [REP*] INSB/INSW/INSD
2030 * ES:EDI,DX[,ECX]
2031 *
2032 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2033 *
2034 * @returns Strict VBox status code. Informational status codes other than the one documented
2035 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2036 * @retval VINF_SUCCESS Success.
2037 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2038 * status code must be passed on to EM.
2039 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2040 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2041 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2042 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2043 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2044 *
2045 * @param pVM The virtual machine.
2046 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2047 * @param uPort IO Port
2048 * @param uPrefix IO instruction prefix
2049 * @param enmAddrMode The address mode.
2050 * @param cbTransfer Size of transfer unit
2051 */
2052VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2053 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2054{
2055 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2056
2057 /*
2058 * We do not support REPNE or decrementing destination
2059 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2060 */
2061 if ( (uPrefix & DISPREFIX_REPNE)
2062 || pRegFrame->eflags.Bits.u1DF)
2063 return VINF_EM_RAW_EMULATE_INSTR;
2064
2065 PVMCPU pVCpu = VMMGetCpu(pVM);
2066
2067 /*
2068 * Get bytes/words/dwords count to transfer.
2069 */
2070 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2071 RTGCUINTREG cTransfers = 1;
2072 if (uPrefix & DISPREFIX_REP)
2073 {
2074#ifndef IN_RC
2075 if ( CPUMIsGuestIn64BitCode(pVCpu)
2076 && pRegFrame->rcx >= _4G)
2077 return VINF_EM_RAW_EMULATE_INSTR;
2078#endif
2079 cTransfers = pRegFrame->rcx & fAddrMask;
2080 if (!cTransfers)
2081 return VINF_SUCCESS;
2082 }
2083
2084 /* Convert destination address es:edi. */
2085 RTGCPTR GCPtrDst;
2086 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2087 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2088 &GCPtrDst);
2089 if (RT_FAILURE(rc2))
2090 {
2091 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2092 return VINF_EM_RAW_EMULATE_INSTR;
2093 }
2094
2095 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2096 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2097 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2098 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2099 if (rc2 != VINF_SUCCESS)
2100 {
2101 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2102 return VINF_EM_RAW_EMULATE_INSTR;
2103 }
2104
2105 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2106 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2107 if (cTransfers > 1)
2108 {
2109 /* If the device supports string transfers, ask it to do as
2110 * much as it wants. The rest is done with single-word transfers. */
2111 const RTGCUINTREG cTransfersOrg = cTransfers;
2112 rcStrict = IOMIOPortReadString(pVM, uPort, &GCPtrDst, &cTransfers, cbTransfer);
2113 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2114 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2115 | (pRegFrame->rdi & ~fAddrMask);
2116 }
2117
2118#ifdef IN_RC
2119 MMGCRamRegisterTrapHandler(pVM);
2120#endif
2121 while (cTransfers && rcStrict == VINF_SUCCESS)
2122 {
2123 uint32_t u32Value;
2124 rcStrict = IOMIOPortRead(pVM, uPort, &u32Value, cbTransfer);
2125 if (!IOM_SUCCESS(rcStrict))
2126 break;
2127 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2128 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2129 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2130 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2131 | (pRegFrame->rdi & ~fAddrMask);
2132 cTransfers--;
2133 }
2134#ifdef IN_RC
2135 MMGCRamDeregisterTrapHandler(pVM);
2136#endif
2137
2138 /* Update rcx on exit. */
2139 if (uPrefix & DISPREFIX_REP)
2140 pRegFrame->rcx = (cTransfers & fAddrMask)
2141 | (pRegFrame->rcx & ~fAddrMask);
2142
2143 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2144 return rcStrict;
2145}
2146
2147
2148/**
2149 * [REP*] INSB/INSW/INSD
2150 * ES:EDI,DX[,ECX]
2151 *
2152 * @returns Strict VBox status code. Informational status codes other than the one documented
2153 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2154 * @retval VINF_SUCCESS Success.
2155 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2156 * status code must be passed on to EM.
2157 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2158 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2159 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2160 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2161 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2162 *
2163 * @param pVM The virtual machine.
2164 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2165 * @param pCpu Disassembler CPU state.
2166 */
2167VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2168{
2169 /*
2170 * Get port number directly from the register (no need to bother the
2171 * disassembler). And get the I/O register size from the opcode / prefix.
2172 */
2173 RTIOPORT Port = pRegFrame->edx & 0xffff;
2174 unsigned cb = 0;
2175 if (pCpu->pCurInstr->uOpcode == OP_INSB)
2176 cb = 1;
2177 else
2178 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2179
2180 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2181 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2182 {
2183 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2184 return rcStrict;
2185 }
2186
2187 return IOMInterpretINSEx(pVM, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2188}
2189
2190
2191/**
2192 * [REP*] OUTSB/OUTSW/OUTSD
2193 * DS:ESI,DX[,ECX]
2194 *
2195 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2196 *
2197 * @returns Strict VBox status code. Informational status codes other than the one documented
2198 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2199 * @retval VINF_SUCCESS Success.
2200 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2201 * status code must be passed on to EM.
2202 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2203 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2204 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2205 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2206 *
2207 * @param pVM The virtual machine.
2208 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2209 * @param uPort IO Port
2210 * @param uPrefix IO instruction prefix
2211 * @param enmAddrMode The address mode.
2212 * @param cbTransfer Size of transfer unit
2213 */
2214VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2215 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2216{
2217 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2218
2219 /*
2220 * We do not support segment prefixes, REPNE or
2221 * decrementing source pointer.
2222 */
2223 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2224 || pRegFrame->eflags.Bits.u1DF)
2225 return VINF_EM_RAW_EMULATE_INSTR;
2226
2227 PVMCPU pVCpu = VMMGetCpu(pVM);
2228
2229 /*
2230 * Get bytes/words/dwords count to transfer.
2231 */
2232 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2233 RTGCUINTREG cTransfers = 1;
2234 if (uPrefix & DISPREFIX_REP)
2235 {
2236#ifndef IN_RC
2237 if ( CPUMIsGuestIn64BitCode(pVCpu)
2238 && pRegFrame->rcx >= _4G)
2239 return VINF_EM_RAW_EMULATE_INSTR;
2240#endif
2241 cTransfers = pRegFrame->rcx & fAddrMask;
2242 if (!cTransfers)
2243 return VINF_SUCCESS;
2244 }
2245
2246 /* Convert source address ds:esi. */
2247 RTGCPTR GCPtrSrc;
2248 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2249 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2250 &GCPtrSrc);
2251 if (RT_FAILURE(rc2))
2252 {
2253 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2254 return VINF_EM_RAW_EMULATE_INSTR;
2255 }
2256
2257 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2258 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2259 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2260 (cpl == 3) ? X86_PTE_US : 0);
2261 if (rc2 != VINF_SUCCESS)
2262 {
2263 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2264 return VINF_EM_RAW_EMULATE_INSTR;
2265 }
2266
2267 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2268 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2269 if (cTransfers > 1)
2270 {
2271 /*
2272 * If the device supports string transfers, ask it to do as
2273 * much as it wants. The rest is done with single-word transfers.
2274 */
2275 const RTGCUINTREG cTransfersOrg = cTransfers;
2276 rcStrict = IOMIOPortWriteString(pVM, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
2277 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2278 pRegFrame->rsi = ((pRegFrame->rsi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2279 | (pRegFrame->rsi & ~fAddrMask);
2280 }
2281
2282#ifdef IN_RC
2283 MMGCRamRegisterTrapHandler(pVM);
2284#endif
2285
2286 while (cTransfers && rcStrict == VINF_SUCCESS)
2287 {
2288 uint32_t u32Value = 0;
2289 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2290 if (rcStrict != VINF_SUCCESS)
2291 break;
2292 rcStrict = IOMIOPortWrite(pVM, uPort, u32Value, cbTransfer);
2293 if (!IOM_SUCCESS(rcStrict))
2294 break;
2295 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2296 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2297 | (pRegFrame->rsi & ~fAddrMask);
2298 cTransfers--;
2299 }
2300
2301#ifdef IN_RC
2302 MMGCRamDeregisterTrapHandler(pVM);
2303#endif
2304
2305 /* Update rcx on exit. */
2306 if (uPrefix & DISPREFIX_REP)
2307 pRegFrame->rcx = (cTransfers & fAddrMask)
2308 | (pRegFrame->rcx & ~fAddrMask);
2309
2310 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2311 return rcStrict;
2312}
2313
2314
2315/**
2316 * [REP*] OUTSB/OUTSW/OUTSD
2317 * DS:ESI,DX[,ECX]
2318 *
2319 * @returns Strict VBox status code. Informational status codes other than the one documented
2320 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2321 * @retval VINF_SUCCESS Success.
2322 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2323 * status code must be passed on to EM.
2324 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2325 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
2326 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2327 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2328 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2329 *
2330 * @param pVM The virtual machine.
2331 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2332 * @param pCpu Disassembler CPU state.
2333 */
2334VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2335{
2336 /*
2337 * Get port number from the first parameter.
2338 * And get the I/O register size from the opcode / prefix.
2339 */
2340 uint64_t Port = 0;
2341 unsigned cb = 0;
2342 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &Port, &cb);
2343 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
2344 if (pCpu->pCurInstr->uOpcode == OP_OUTSB)
2345 cb = 1;
2346 else
2347 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2348
2349 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2350 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2351 {
2352 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2353 return rcStrict;
2354 }
2355
2356 return IOMInterpretOUTSEx(pVM, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2357}
2358
2359#ifndef IN_RC
2360
2361/**
2362 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2363 *
2364 * (This is a special optimization used by the VGA device.)
2365 *
2366 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2367 * remapping is made,.
2368 *
2369 * @param pVM The virtual machine.
2370 * @param GCPhys The address of the MMIO page to be changed.
2371 * @param GCPhysRemapped The address of the MMIO2 page.
2372 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2373 * for the time being.
2374 */
2375VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2376{
2377 /* Currently only called from the VGA device during MMIO. */
2378 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2379 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2380 PVMCPU pVCpu = VMMGetCpu(pVM);
2381
2382 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2383 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2384 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2385 && !HMIsNestedPagingActive(pVM)))
2386 return VINF_SUCCESS; /* ignore */
2387
2388 int rc = IOM_LOCK(pVM);
2389 if (RT_FAILURE(rc))
2390 return VINF_SUCCESS; /* better luck the next time around */
2391
2392 /*
2393 * Lookup the context range node the page belongs to.
2394 */
2395 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, GCPhys);
2396 AssertMsgReturn(pRange,
2397 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2398
2399 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2400 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2401
2402 /*
2403 * Do the aliasing; page align the addresses since PGM is picky.
2404 */
2405 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2406 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2407
2408 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2409
2410 IOM_UNLOCK(pVM);
2411 AssertRCReturn(rc, rc);
2412
2413 /*
2414 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2415 * can simply prefetch it.
2416 *
2417 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2418 */
2419#if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2420# ifdef VBOX_STRICT
2421 uint64_t fFlags;
2422 RTHCPHYS HCPhys;
2423 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2424 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2425# endif
2426#endif
2427 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2428 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2429 return VINF_SUCCESS;
2430}
2431
2432
2433/**
2434 * Mapping a HC page in place of an MMIO page for direct access.
2435 *
2436 * (This is a special optimization used by the APIC in the VT-x case.)
2437 *
2438 * @returns VBox status code.
2439 *
2440 * @param pVM Pointer to the VM.
2441 * @param pVCpu Pointer to the VMCPU.
2442 * @param GCPhys The address of the MMIO page to be changed.
2443 * @param HCPhys The address of the host physical page.
2444 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2445 * for the time being.
2446 */
2447VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2448{
2449 /* Currently only called from VT-x code during a page fault. */
2450 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2451
2452 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2453 Assert(HMIsEnabled(pVM));
2454
2455 /*
2456 * Lookup the context range node the page belongs to.
2457 */
2458#ifdef VBOX_STRICT
2459 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2460 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2461 AssertMsgReturn(pRange,
2462 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2463 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2464 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2465#endif
2466
2467 /*
2468 * Do the aliasing; page align the addresses since PGM is picky.
2469 */
2470 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2471 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2472
2473 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2474 AssertRCReturn(rc, rc);
2475
2476 /*
2477 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2478 * can simply prefetch it.
2479 *
2480 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2481 */
2482 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2483 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2484 return VINF_SUCCESS;
2485}
2486
2487
2488/**
2489 * Reset a previously modified MMIO region; restore the access flags.
2490 *
2491 * @returns VBox status code.
2492 *
2493 * @param pVM The virtual machine.
2494 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2495 */
2496VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2497{
2498 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2499
2500 PVMCPU pVCpu = VMMGetCpu(pVM);
2501
2502 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2503 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2504 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2505 && !HMIsNestedPagingActive(pVM)))
2506 return VINF_SUCCESS; /* ignore */
2507
2508 /*
2509 * Lookup the context range node the page belongs to.
2510 */
2511#ifdef VBOX_STRICT
2512 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2513 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, GCPhys);
2514 AssertMsgReturn(pRange,
2515 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2516 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2517 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2518#endif
2519
2520 /*
2521 * Call PGM to do the job work.
2522 *
2523 * After the call, all the pages should be non-present... unless there is
2524 * a page pool flush pending (unlikely).
2525 */
2526 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2527 AssertRC(rc);
2528
2529#ifdef VBOX_STRICT
2530 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2531 {
2532 uint32_t cb = pRange->cb;
2533 GCPhys = pRange->GCPhys;
2534 while (cb)
2535 {
2536 uint64_t fFlags;
2537 RTHCPHYS HCPhys;
2538 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2539 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2540 cb -= PAGE_SIZE;
2541 GCPhys += PAGE_SIZE;
2542 }
2543 }
2544#endif
2545 return rc;
2546}
2547
2548#endif /* !IN_RC */
2549
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette