VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 60066

Last change on this file since 60066 was 56628, checked in by vboxsync, 9 years ago

IEM: Postpone INS memory writes to ring-3 if we hit an access handler. We cannot redo the read, that will only mess things us. This introduces a new per-cpu forced flag, VMCPU_FF_IEM, that must cause immediate return to ring-3 where it will be serviced ASAP. IEM will try return VINF_EM_RAW_TO_R3 as well to help make sure we get back to ring-3.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 56.8 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 56628 2015-06-24 19:44:56Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50# define IS_64_BIT_CODE(a_pIemCpu) (true)
51#else
52# error "Bad ADDR_SIZE."
53#endif
54#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
55
56#if ADDR_SIZE == 64 || OP_SIZE == 64
57# define IS_64_BIT_CODE(a_pIemCpu) (true)
58#elif ADDR_SIZE == 32
59# define IS_64_BIT_CODE(a_pIemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT)
60#else
61# define IS_64_BIT_CODE(a_pIemCpu) (false)
62#endif
63
64
65/**
66 * Implements 'REPE CMPS'.
67 */
68IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
69{
70 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
71
72 /*
73 * Setup.
74 */
75 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
76 if (uCounterReg == 0)
77 {
78 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
79 return VINF_SUCCESS;
80 }
81
82 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
83 uint64_t uSrc1Base;
84 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
85 if (rcStrict != VINF_SUCCESS)
86 return rcStrict;
87
88 uint64_t uSrc2Base;
89 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
90 if (rcStrict != VINF_SUCCESS)
91 return rcStrict;
92
93 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
94 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
95 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
96 uint32_t uEFlags = pCtx->eflags.u;
97
98 /*
99 * The loop.
100 */
101 do
102 {
103 /*
104 * Do segmentation and virtual page stuff.
105 */
106 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
107 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
108 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
109 if (cLeftSrc1Page > uCounterReg)
110 cLeftSrc1Page = uCounterReg;
111 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
112 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
113
114 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
115 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
116 && ( IS_64_BIT_CODE(pIemCpu)
117 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
118 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
119 && uSrc2AddrReg < pCtx->es.u32Limit
120 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
121 )
122 )
123 {
124 RTGCPHYS GCPhysSrc1Mem;
125 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
126 if (rcStrict != VINF_SUCCESS)
127 return rcStrict;
128
129 RTGCPHYS GCPhysSrc2Mem;
130 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
131 if (rcStrict != VINF_SUCCESS)
132 return rcStrict;
133
134 /*
135 * If we can map the page without trouble, do a block processing
136 * until the end of the current page.
137 */
138 PGMPAGEMAPLOCK PgLockSrc2Mem;
139 OP_TYPE const *puSrc2Mem;
140 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
141 if (rcStrict == VINF_SUCCESS)
142 {
143 PGMPAGEMAPLOCK PgLockSrc1Mem;
144 OP_TYPE const *puSrc1Mem;
145 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
146 if (rcStrict == VINF_SUCCESS)
147 {
148 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
149 {
150 /* All matches, only compare the last itme to get the right eflags. */
151 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
152 uSrc1AddrReg += cLeftPage * cbIncr;
153 uSrc2AddrReg += cLeftPage * cbIncr;
154 uCounterReg -= cLeftPage;
155 }
156 else
157 {
158 /* Some mismatch, compare each item (and keep volatile
159 memory in mind). */
160 uint32_t off = 0;
161 do
162 {
163 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
164 off++;
165 } while ( off < cLeftPage
166 && (uEFlags & X86_EFL_ZF));
167 uSrc1AddrReg += cbIncr * off;
168 uSrc2AddrReg += cbIncr * off;
169 uCounterReg -= off;
170 }
171
172 /* Update the registers before looping. */
173 pCtx->ADDR_rCX = uCounterReg;
174 pCtx->ADDR_rSI = uSrc1AddrReg;
175 pCtx->ADDR_rDI = uSrc2AddrReg;
176 pCtx->eflags.u = uEFlags;
177
178 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
179 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
180 continue;
181 }
182 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
183 }
184 }
185
186 /*
187 * Fallback - slow processing till the end of the current page.
188 * In the cross page boundrary case we will end up here with cLeftPage
189 * as 0, we execute one loop then.
190 */
191 do
192 {
193 OP_TYPE uValue1;
194 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
195 if (rcStrict != VINF_SUCCESS)
196 return rcStrict;
197 OP_TYPE uValue2;
198 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
199 if (rcStrict != VINF_SUCCESS)
200 return rcStrict;
201 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
202
203 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
204 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
205 pCtx->ADDR_rCX = --uCounterReg;
206 pCtx->eflags.u = uEFlags;
207 cLeftPage--;
208 } while ( (int32_t)cLeftPage > 0
209 && (uEFlags & X86_EFL_ZF));
210 } while ( uCounterReg != 0
211 && (uEFlags & X86_EFL_ZF));
212
213 /*
214 * Done.
215 */
216 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
217 return VINF_SUCCESS;
218}
219
220
221/**
222 * Implements 'REPNE CMPS'.
223 */
224IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
225{
226 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
227
228 /*
229 * Setup.
230 */
231 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
232 if (uCounterReg == 0)
233 {
234 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
235 return VINF_SUCCESS;
236 }
237
238 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
239 uint64_t uSrc1Base;
240 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
241 if (rcStrict != VINF_SUCCESS)
242 return rcStrict;
243
244 uint64_t uSrc2Base;
245 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
246 if (rcStrict != VINF_SUCCESS)
247 return rcStrict;
248
249 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
250 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
251 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
252 uint32_t uEFlags = pCtx->eflags.u;
253
254 /*
255 * The loop.
256 */
257 do
258 {
259 /*
260 * Do segmentation and virtual page stuff.
261 */
262 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
263 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
264 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 if (cLeftSrc1Page > uCounterReg)
266 cLeftSrc1Page = uCounterReg;
267 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
268 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
269
270 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
271 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
272 && ( IS_64_BIT_CODE(pIemCpu)
273 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
274 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
275 && uSrc2AddrReg < pCtx->es.u32Limit
276 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
277 )
278 )
279 {
280 RTGCPHYS GCPhysSrc1Mem;
281 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
282 if (rcStrict != VINF_SUCCESS)
283 return rcStrict;
284
285 RTGCPHYS GCPhysSrc2Mem;
286 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
287 if (rcStrict != VINF_SUCCESS)
288 return rcStrict;
289
290 /*
291 * If we can map the page without trouble, do a block processing
292 * until the end of the current page.
293 */
294 OP_TYPE const *puSrc2Mem;
295 PGMPAGEMAPLOCK PgLockSrc2Mem;
296 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
297 if (rcStrict == VINF_SUCCESS)
298 {
299 OP_TYPE const *puSrc1Mem;
300 PGMPAGEMAPLOCK PgLockSrc1Mem;
301 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
302 if (rcStrict == VINF_SUCCESS)
303 {
304 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
305 {
306 /* All matches, only compare the last item to get the right eflags. */
307 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
308 uSrc1AddrReg += cLeftPage * cbIncr;
309 uSrc2AddrReg += cLeftPage * cbIncr;
310 uCounterReg -= cLeftPage;
311 }
312 else
313 {
314 /* Some mismatch, compare each item (and keep volatile
315 memory in mind). */
316 uint32_t off = 0;
317 do
318 {
319 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
320 off++;
321 } while ( off < cLeftPage
322 && !(uEFlags & X86_EFL_ZF));
323 uSrc1AddrReg += cbIncr * off;
324 uSrc2AddrReg += cbIncr * off;
325 uCounterReg -= off;
326 }
327
328 /* Update the registers before looping. */
329 pCtx->ADDR_rCX = uCounterReg;
330 pCtx->ADDR_rSI = uSrc1AddrReg;
331 pCtx->ADDR_rDI = uSrc2AddrReg;
332 pCtx->eflags.u = uEFlags;
333
334 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 continue;
337 }
338 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
339 }
340 }
341
342 /*
343 * Fallback - slow processing till the end of the current page.
344 * In the cross page boundrary case we will end up here with cLeftPage
345 * as 0, we execute one loop then.
346 */
347 do
348 {
349 OP_TYPE uValue1;
350 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
351 if (rcStrict != VINF_SUCCESS)
352 return rcStrict;
353 OP_TYPE uValue2;
354 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
355 if (rcStrict != VINF_SUCCESS)
356 return rcStrict;
357 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
358
359 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
360 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
361 pCtx->ADDR_rCX = --uCounterReg;
362 pCtx->eflags.u = uEFlags;
363 cLeftPage--;
364 } while ( (int32_t)cLeftPage > 0
365 && !(uEFlags & X86_EFL_ZF));
366 } while ( uCounterReg != 0
367 && !(uEFlags & X86_EFL_ZF));
368
369 /*
370 * Done.
371 */
372 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
373 return VINF_SUCCESS;
374}
375
376
377/**
378 * Implements 'REPE SCAS'.
379 */
380IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
381{
382 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
383
384 /*
385 * Setup.
386 */
387 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
388 if (uCounterReg == 0)
389 {
390 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
391 return VINF_SUCCESS;
392 }
393
394 uint64_t uBaseAddr;
395 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
396 if (rcStrict != VINF_SUCCESS)
397 return rcStrict;
398
399 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
400 OP_TYPE const uValueReg = pCtx->OP_rAX;
401 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
402 uint32_t uEFlags = pCtx->eflags.u;
403
404 /*
405 * The loop.
406 */
407 do
408 {
409 /*
410 * Do segmentation and virtual page stuff.
411 */
412 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418 && ( IS_64_BIT_CODE(pIemCpu)
419 || ( uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
421 )
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 uint64_t uBaseAddr;
517 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
518 if (rcStrict != VINF_SUCCESS)
519 return rcStrict;
520
521 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
522 OP_TYPE const uValueReg = pCtx->OP_rAX;
523 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
524 uint32_t uEFlags = pCtx->eflags.u;
525
526 /*
527 * The loop.
528 */
529 do
530 {
531 /*
532 * Do segmentation and virtual page stuff.
533 */
534 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
535 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
536 if (cLeftPage > uCounterReg)
537 cLeftPage = uCounterReg;
538 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
539 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
540 && ( IS_64_BIT_CODE(pIemCpu)
541 || ( uAddrReg < pCtx->es.u32Limit
542 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
543 )
544 )
545 {
546 RTGCPHYS GCPhysMem;
547 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
548 if (rcStrict != VINF_SUCCESS)
549 return rcStrict;
550
551 /*
552 * If we can map the page without trouble, do a block processing
553 * until the end of the current page.
554 */
555 PGMPAGEMAPLOCK PgLockMem;
556 OP_TYPE const *puMem;
557 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
558 if (rcStrict == VINF_SUCCESS)
559 {
560 /* Search till we find a mismatching item. */
561 OP_TYPE uTmpValue;
562 bool fQuit;
563 uint32_t i = 0;
564 do
565 {
566 uTmpValue = puMem[i++];
567 fQuit = uTmpValue == uValueReg;
568 } while (i < cLeftPage && !fQuit);
569
570 /* Update the regs. */
571 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
572 pCtx->ADDR_rCX = uCounterReg -= i;
573 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
574 pCtx->eflags.u = uEFlags;
575 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
576 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
577 if (fQuit)
578 break;
579
580
581 /* If unaligned, we drop thru and do the page crossing access
582 below. Otherwise, do the next page. */
583 if (!(uVirtAddr & (OP_SIZE - 1)))
584 continue;
585 if (uCounterReg == 0)
586 break;
587 cLeftPage = 0;
588 }
589 }
590
591 /*
592 * Fallback - slow processing till the end of the current page.
593 * In the cross page boundrary case we will end up here with cLeftPage
594 * as 0, we execute one loop then.
595 */
596 do
597 {
598 OP_TYPE uTmpValue;
599 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
600 if (rcStrict != VINF_SUCCESS)
601 return rcStrict;
602 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
603 pCtx->ADDR_rDI = uAddrReg += cbIncr;
604 pCtx->ADDR_rCX = --uCounterReg;
605 pCtx->eflags.u = uEFlags;
606 cLeftPage--;
607 } while ( (int32_t)cLeftPage > 0
608 && !(uEFlags & X86_EFL_ZF));
609 } while ( uCounterReg != 0
610 && !(uEFlags & X86_EFL_ZF));
611
612 /*
613 * Done.
614 */
615 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
616 return VINF_SUCCESS;
617}
618
619
620
621
622/**
623 * Implements 'REP MOVS'.
624 */
625IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
626{
627 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
628
629 /*
630 * Setup.
631 */
632 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
633 if (uCounterReg == 0)
634 {
635 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
636 return VINF_SUCCESS;
637 }
638
639 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
640 uint64_t uSrcBase;
641 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uSrcBase);
642 if (rcStrict != VINF_SUCCESS)
643 return rcStrict;
644
645 uint64_t uDstBase;
646 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uDstBase);
647 if (rcStrict != VINF_SUCCESS)
648 return rcStrict;
649
650 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
651 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
652 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
653
654 /*
655 * Be careful with handle bypassing.
656 */
657 if (pIemCpu->fBypassHandlers)
658 {
659 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
660 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
661 }
662
663 /*
664 * If we're reading back what we write, we have to let the verfication code
665 * to prevent a false positive.
666 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
667 */
668#ifdef IEM_VERIFICATION_MODE_FULL
669 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
670 && (cbIncr > 0
671 ? uSrcAddrReg <= uDstAddrReg
672 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
673 : uDstAddrReg <= uSrcAddrReg
674 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
675 pIemCpu->fOverlappingMovs = true;
676#endif
677
678 /*
679 * The loop.
680 */
681 do
682 {
683 /*
684 * Do segmentation and virtual page stuff.
685 */
686 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
687 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
688 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
689 if (cLeftSrcPage > uCounterReg)
690 cLeftSrcPage = uCounterReg;
691 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
692 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
693
694 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
695 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
696 && ( IS_64_BIT_CODE(pIemCpu)
697 || ( uSrcAddrReg < pSrcHid->u32Limit
698 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
699 && uDstAddrReg < pCtx->es.u32Limit
700 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
701 )
702 )
703 {
704 RTGCPHYS GCPhysSrcMem;
705 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
706 if (rcStrict != VINF_SUCCESS)
707 return rcStrict;
708
709 RTGCPHYS GCPhysDstMem;
710 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
711 if (rcStrict != VINF_SUCCESS)
712 return rcStrict;
713
714 /*
715 * If we can map the page without trouble, do a block processing
716 * until the end of the current page.
717 */
718 PGMPAGEMAPLOCK PgLockDstMem;
719 OP_TYPE *puDstMem;
720 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
721 if (rcStrict == VINF_SUCCESS)
722 {
723 PGMPAGEMAPLOCK PgLockSrcMem;
724 OP_TYPE const *puSrcMem;
725 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
726 if (rcStrict == VINF_SUCCESS)
727 {
728 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
729 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
730
731 /* Perform the operation exactly (don't use memcpy to avoid
732 having to consider how its implementation would affect
733 any overlapping source and destination area). */
734 OP_TYPE const *puSrcCur = puSrcMem;
735 OP_TYPE *puDstCur = puDstMem;
736 uint32_t cTodo = cLeftPage;
737 while (cTodo-- > 0)
738 *puDstCur++ = *puSrcCur++;
739
740 /* Update the registers. */
741 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
742 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
743 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
744
745 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
746 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
747 continue;
748 }
749 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
750 }
751 }
752
753 /*
754 * Fallback - slow processing till the end of the current page.
755 * In the cross page boundrary case we will end up here with cLeftPage
756 * as 0, we execute one loop then.
757 */
758 do
759 {
760 OP_TYPE uValue;
761 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
762 if (rcStrict != VINF_SUCCESS)
763 return rcStrict;
764 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
765 if (rcStrict != VINF_SUCCESS)
766 return rcStrict;
767
768 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
769 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
770 pCtx->ADDR_rCX = --uCounterReg;
771 cLeftPage--;
772 } while ((int32_t)cLeftPage > 0);
773 } while (uCounterReg != 0);
774
775 /*
776 * Done.
777 */
778 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
779 return VINF_SUCCESS;
780}
781
782
783/**
784 * Implements 'REP STOS'.
785 */
786IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
787{
788 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
789
790 /*
791 * Setup.
792 */
793 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
794 if (uCounterReg == 0)
795 {
796 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
797 return VINF_SUCCESS;
798 }
799
800 uint64_t uBaseAddr;
801 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
802 if (rcStrict != VINF_SUCCESS)
803 return rcStrict;
804
805 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
806 OP_TYPE const uValue = pCtx->OP_rAX;
807 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
808
809 /*
810 * Be careful with handle bypassing.
811 */
812 /** @todo Permit doing a page if correctly aligned. */
813 if (pIemCpu->fBypassHandlers)
814 {
815 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
816 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
817 }
818
819 /*
820 * The loop.
821 */
822 do
823 {
824 /*
825 * Do segmentation and virtual page stuff.
826 */
827 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
828 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
829 if (cLeftPage > uCounterReg)
830 cLeftPage = uCounterReg;
831 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
832 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
833 && ( IS_64_BIT_CODE(pIemCpu)
834 || ( uAddrReg < pCtx->es.u32Limit
835 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
836 )
837 )
838 {
839 RTGCPHYS GCPhysMem;
840 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
841 if (rcStrict != VINF_SUCCESS)
842 return rcStrict;
843
844 /*
845 * If we can map the page without trouble, do a block processing
846 * until the end of the current page.
847 */
848 PGMPAGEMAPLOCK PgLockMem;
849 OP_TYPE *puMem;
850 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
851 if (rcStrict == VINF_SUCCESS)
852 {
853 /* Update the regs first so we can loop on cLeftPage. */
854 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
855 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
856
857 /* Do the memsetting. */
858#if OP_SIZE == 8
859 memset(puMem, uValue, cLeftPage);
860/*#elif OP_SIZE == 32
861 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
862#else
863 while (cLeftPage-- > 0)
864 *puMem++ = uValue;
865#endif
866
867 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
868
869 /* If unaligned, we drop thru and do the page crossing access
870 below. Otherwise, do the next page. */
871 if (!(uVirtAddr & (OP_SIZE - 1)))
872 continue;
873 if (uCounterReg == 0)
874 break;
875 cLeftPage = 0;
876 }
877 }
878
879 /*
880 * Fallback - slow processing till the end of the current page.
881 * In the cross page boundrary case we will end up here with cLeftPage
882 * as 0, we execute one loop then.
883 */
884 do
885 {
886 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
887 if (rcStrict != VINF_SUCCESS)
888 return rcStrict;
889 pCtx->ADDR_rDI = uAddrReg += cbIncr;
890 pCtx->ADDR_rCX = --uCounterReg;
891 cLeftPage--;
892 } while ((int32_t)cLeftPage > 0);
893 } while (uCounterReg != 0);
894
895 /*
896 * Done.
897 */
898 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
899 return VINF_SUCCESS;
900}
901
902
903/**
904 * Implements 'REP LODS'.
905 */
906IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
907{
908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
909
910 /*
911 * Setup.
912 */
913 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
914 if (uCounterReg == 0)
915 {
916 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
917 return VINF_SUCCESS;
918 }
919
920 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
921 uint64_t uBaseAddr;
922 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uBaseAddr);
923 if (rcStrict != VINF_SUCCESS)
924 return rcStrict;
925
926 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
927 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
928
929 /*
930 * The loop.
931 */
932 do
933 {
934 /*
935 * Do segmentation and virtual page stuff.
936 */
937 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
938 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
939 if (cLeftPage > uCounterReg)
940 cLeftPage = uCounterReg;
941 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
942 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
943 && ( IS_64_BIT_CODE(pIemCpu)
944 || ( uAddrReg < pSrcHid->u32Limit
945 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
946 )
947 )
948 {
949 RTGCPHYS GCPhysMem;
950 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
951 if (rcStrict != VINF_SUCCESS)
952 return rcStrict;
953
954 /*
955 * If we can map the page without trouble, we can get away with
956 * just reading the last value on the page.
957 */
958 PGMPAGEMAPLOCK PgLockMem;
959 OP_TYPE const *puMem;
960 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
961 if (rcStrict == VINF_SUCCESS)
962 {
963 /* Only get the last byte, the rest doesn't matter in direct access mode. */
964#if OP_SIZE == 32
965 pCtx->rax = puMem[cLeftPage - 1];
966#else
967 pCtx->OP_rAX = puMem[cLeftPage - 1];
968#endif
969 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
970 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
971 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
972
973 /* If unaligned, we drop thru and do the page crossing access
974 below. Otherwise, do the next page. */
975 if (!(uVirtAddr & (OP_SIZE - 1)))
976 continue;
977 if (uCounterReg == 0)
978 break;
979 cLeftPage = 0;
980 }
981 }
982
983 /*
984 * Fallback - slow processing till the end of the current page.
985 * In the cross page boundrary case we will end up here with cLeftPage
986 * as 0, we execute one loop then.
987 */
988 do
989 {
990 OP_TYPE uTmpValue;
991 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
992 if (rcStrict != VINF_SUCCESS)
993 return rcStrict;
994#if OP_SIZE == 32
995 pCtx->rax = uTmpValue;
996#else
997 pCtx->OP_rAX = uTmpValue;
998#endif
999 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1000 pCtx->ADDR_rCX = --uCounterReg;
1001 cLeftPage--;
1002 } while ((int32_t)cLeftPage > 0);
1003 if (rcStrict != VINF_SUCCESS)
1004 break;
1005 } while (uCounterReg != 0);
1006
1007 /*
1008 * Done.
1009 */
1010 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1011 return VINF_SUCCESS;
1012}
1013
1014
1015#if OP_SIZE != 64
1016
1017# if !defined(IN_RING3) && !defined(IEMCIMPL_INS_INLINES)
1018# define IEMCIMPL_INS_INLINES 1
1019
1020/**
1021 * Check if we should postpone committing an INS instruction to ring-3, or if we
1022 * should rather panic.
1023 *
1024 * @returns true if we should postpone it, false if it's better to panic.
1025 * @param rcStrictMem The status code returned by the memory write.
1026 */
1027DECLINLINE(bool) iemCImpl_ins_shouldPostponeCommitToRing3(VBOXSTRICTRC rcStrictMem)
1028{
1029 /*
1030 * The following requires executing the write in ring-3.
1031 * See PGMPhysWrite for status code explanations.
1032 */
1033 if ( rcStrictMem == VINF_IOM_R3_MMIO_WRITE
1034 || rcStrictMem == VINF_IOM_R3_MMIO_READ_WRITE
1035 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR
1036# ifdef IN_RC
1037 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
1038 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
1039 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
1040 || rcStrictMem == VINF_CSAM_PENDING_ACTION
1041 || rcStrictMem == VINF_PATM_CHECK_PATCH_PAGE
1042# endif
1043 )
1044 return true;
1045
1046 /* For the other status code, the pass-up handling should already have
1047 caught them. So, anything getting down here is a real problem worth
1048 meditating over. */
1049 return false;
1050}
1051
1052
1053/**
1054 * Merges a iemCImpl_ins_shouldPostponeCommitToRing3() status with the I/O port
1055 * status.
1056 *
1057 * @returns status code.
1058 * @param rcStrictPort The status returned by the I/O port read.
1059 * @param rcStrictMem The status code returned by the memory write.
1060 */
1061DECLINLINE(VBOXSTRICTRC) iemCImpl_ins_mergePostponedCommitStatuses(VBOXSTRICTRC rcStrictPort, VBOXSTRICTRC rcStrictMem)
1062{
1063 /* Turns out we don't need a lot of merging, since we'll be redoing the
1064 write anyway. (CSAM, PATM status codes, perhaps, but that's about it.) */
1065 return rcStrictPort == VINF_SUCCESS ? VINF_EM_RAW_TO_R3 : rcStrictPort;
1066}
1067
1068# endif /* !IN_RING3 || !IEMCIMPL_INS_INLINES */
1069
1070
1071/**
1072 * Implements 'INS' (no rep)
1073 */
1074IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1075{
1076 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1077 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1078 VBOXSTRICTRC rcStrict;
1079
1080 /*
1081 * Be careful with handle bypassing.
1082 */
1083 if (pIemCpu->fBypassHandlers)
1084 {
1085 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1086 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1087 }
1088
1089 /*
1090 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1091 * segmentation and finally any #PF due to virtual address translation.
1092 * ASSUMES nothing is read from the I/O port before traps are taken.
1093 */
1094 if (!fIoChecked)
1095 {
1096 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1097 if (rcStrict != VINF_SUCCESS)
1098 return rcStrict;
1099 }
1100
1101 OP_TYPE *puMem;
1102 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1103 if (rcStrict != VINF_SUCCESS)
1104 return rcStrict;
1105
1106 uint32_t u32Value = 0;
1107 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1108 rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8);
1109 else
1110 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1111 if (IOM_SUCCESS(rcStrict))
1112 {
1113 *puMem = (OP_TYPE)u32Value;
1114 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1115 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1116 {
1117 if (!pCtx->eflags.Bits.u1DF)
1118 pCtx->ADDR_rDI += OP_SIZE / 8;
1119 else
1120 pCtx->ADDR_rDI -= OP_SIZE / 8;
1121 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1122 }
1123#ifndef IN_RING3
1124 /* iemMemMap already checked permissions, so this may only be real errors
1125 or access handlers meddling. In the access handler case, we must postpone
1126 the instruction committing to ring-3. */
1127 else if (iemCImpl_ins_shouldPostponeCommitToRing3(rcStrict2))
1128 {
1129 pIemCpu->PendingCommit.cbInstr = cbInstr;
1130 pIemCpu->PendingCommit.uValue = u32Value;
1131 pIemCpu->PendingCommit.enmFn = RT_CONCAT4(IEMCOMMIT_INS_OP,OP_SIZE,_ADDR,ADDR_SIZE);
1132 pIemCpu->cPendingCommit++;
1133 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
1134 Log(("%s: Postponing to ring-3; cbInstr=%#x u32Value=%#x rcStrict2=%Rrc rcStrict=%Rrc\n", __FUNCTION__,
1135 cbInstr, u32Value, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict)));
1136 rcStrict = iemCImpl_ins_mergePostponedCommitStatuses(rcStrict, rcStrict2);
1137 }
1138#endif
1139 else
1140 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), RT_FAILURE_NP(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1141 }
1142 return rcStrict;
1143}
1144
1145
1146# ifdef IN_RING3
1147/**
1148 * Called in ring-3 when raw-mode or ring-0 was forced to return while
1149 * committing the instruction (hit access handler).
1150 */
1151IEM_CIMPL_DEF_0(RT_CONCAT4(iemR3CImpl_commit_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1152{
1153 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1154 VBOXSTRICTRC rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, pCtx->ADDR_rDI, (OP_TYPE)pIemCpu->PendingCommit.uValue);
1155 if (rcStrict == VINF_SUCCESS)
1156 {
1157 if (!pCtx->eflags.Bits.u1DF)
1158 pCtx->ADDR_rDI += OP_SIZE / 8;
1159 else
1160 pCtx->ADDR_rDI -= OP_SIZE / 8;
1161 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1162 }
1163 return rcStrict;
1164}
1165# endif /* IN_RING3 */
1166
1167
1168/**
1169 * Implements 'REP INS'.
1170 */
1171IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1172{
1173 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1174 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1175 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1176
1177 /*
1178 * Setup.
1179 */
1180 uint16_t const u16Port = pCtx->dx;
1181 VBOXSTRICTRC rcStrict;
1182 if (!fIoChecked)
1183 {
1184 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1185 if (rcStrict != VINF_SUCCESS)
1186 return rcStrict;
1187 }
1188
1189 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1190 if (uCounterReg == 0)
1191 {
1192 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1193 return VINF_SUCCESS;
1194 }
1195
1196 uint64_t uBaseAddr;
1197 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
1198 if (rcStrict != VINF_SUCCESS)
1199 return rcStrict;
1200
1201 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1202 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1203
1204 /*
1205 * Be careful with handle bypassing.
1206 */
1207 if (pIemCpu->fBypassHandlers)
1208 {
1209 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1210 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1211 }
1212
1213 /*
1214 * The loop.
1215 */
1216 do
1217 {
1218 /*
1219 * Do segmentation and virtual page stuff.
1220 */
1221 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1222 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1223 if (cLeftPage > uCounterReg)
1224 cLeftPage = uCounterReg;
1225 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1226 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1227 && ( IS_64_BIT_CODE(pIemCpu)
1228 || ( uAddrReg < pCtx->es.u32Limit
1229 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
1230 )
1231 && !IEM_VERIFICATION_ENABLED(pIemCpu)
1232 )
1233 {
1234 RTGCPHYS GCPhysMem;
1235 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1236 if (rcStrict != VINF_SUCCESS)
1237 return rcStrict;
1238
1239 /*
1240 * If we can map the page without trouble, use the IOM
1241 * string I/O interface to do the work.
1242 */
1243 PGMPAGEMAPLOCK PgLockMem;
1244 OP_TYPE *puMem;
1245 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1246 if (rcStrict == VINF_SUCCESS)
1247 {
1248 uint32_t cTransfers = cLeftPage;
1249 rcStrict = IOMIOPortReadString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1250
1251 uint32_t cActualTransfers = cLeftPage - cTransfers;
1252 Assert(cActualTransfers <= cLeftPage);
1253 pCtx->ADDR_rDI = uAddrReg += cbIncr * cActualTransfers;
1254 pCtx->ADDR_rCX = uCounterReg -= cActualTransfers;
1255 puMem += cActualTransfers;
1256
1257 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1258
1259 if (rcStrict != VINF_SUCCESS)
1260 {
1261 if (IOM_SUCCESS(rcStrict))
1262 {
1263 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1264 if (uCounterReg == 0)
1265 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1266 }
1267 return rcStrict;
1268 }
1269
1270 /* If unaligned, we drop thru and do the page crossing access
1271 below. Otherwise, do the next page. */
1272 if (!(uVirtAddr & (OP_SIZE - 1)))
1273 continue;
1274 if (uCounterReg == 0)
1275 break;
1276 cLeftPage = 0;
1277 }
1278 }
1279
1280 /*
1281 * Fallback - slow processing till the end of the current page.
1282 * In the cross page boundrary case we will end up here with cLeftPage
1283 * as 0, we execute one loop then.
1284 *
1285 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1286 * I/O port, otherwise it wouldn't really be restartable.
1287 */
1288 /** @todo investigate what the CPU actually does with \#PF/\#GP
1289 * during INS. */
1290 do
1291 {
1292 OP_TYPE *puMem;
1293 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1294 if (rcStrict != VINF_SUCCESS)
1295 return rcStrict;
1296
1297 uint32_t u32Value = 0;
1298 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1299 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1300 else
1301 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1302 if (!IOM_SUCCESS(rcStrict))
1303 return rcStrict;
1304
1305 *puMem = (OP_TYPE)u32Value;
1306 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1307 if (rcStrict2 == VINF_SUCCESS)
1308 { /* likely */ }
1309#ifndef IN_RING3
1310 /* iemMemMap already checked permissions, so this may only be real errors
1311 or access handlers meddling. In the access handler case, we must postpone
1312 the instruction committing to ring-3. */
1313 else if (iemCImpl_ins_shouldPostponeCommitToRing3(rcStrict2))
1314 {
1315 pIemCpu->PendingCommit.cbInstr = cbInstr;
1316 pIemCpu->PendingCommit.uValue = u32Value;
1317 pIemCpu->PendingCommit.enmFn = RT_CONCAT4(IEMCOMMIT_REP_INS_OP,OP_SIZE,_ADDR,ADDR_SIZE);
1318 pIemCpu->cPendingCommit++;
1319 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
1320 Log(("%s: Postponing to ring-3; cbInstr=%#x u32Value=%#x rcStrict2=%Rrc rcStrict=%Rrc\n", __FUNCTION__,
1321 cbInstr, u32Value, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict)));
1322 return iemCImpl_ins_mergePostponedCommitStatuses(rcStrict, rcStrict2);
1323 }
1324#endif
1325 else
1326 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1327 RT_FAILURE(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1328
1329 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1330 pCtx->ADDR_rCX = --uCounterReg;
1331
1332 cLeftPage--;
1333 if (rcStrict != VINF_SUCCESS)
1334 {
1335 if (uCounterReg == 0)
1336 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1337 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1338 return rcStrict;
1339 }
1340 } while ((int32_t)cLeftPage > 0);
1341 } while (uCounterReg != 0);
1342
1343 /*
1344 * Done.
1345 */
1346 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1347 return VINF_SUCCESS;
1348}
1349
1350# ifdef IN_RING3
1351/**
1352 * Called in ring-3 when raw-mode or ring-0 was forced to return while
1353 * committing the instruction (hit access handler).
1354 */
1355IEM_CIMPL_DEF_0(RT_CONCAT4(iemR3CImpl_commit_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1356{
1357 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1358 VBOXSTRICTRC rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, pCtx->ADDR_rDI, (OP_TYPE)pIemCpu->PendingCommit.uValue);
1359 if (rcStrict == VINF_SUCCESS)
1360 {
1361 if (!pCtx->eflags.Bits.u1DF)
1362 pCtx->ADDR_rDI += OP_SIZE / 8;
1363 else
1364 pCtx->ADDR_rDI -= OP_SIZE / 8;
1365 pCtx->ADDR_rCX -= 1;
1366 if (pCtx->ADDR_rCX == 0)
1367 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1368 }
1369 return rcStrict;
1370}
1371# endif /* IN_RING3 */
1372
1373
1374/**
1375 * Implements 'OUTS' (no rep)
1376 */
1377IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1378{
1379 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1380 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1381 VBOXSTRICTRC rcStrict;
1382
1383 /*
1384 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1385 * segmentation and finally any #PF due to virtual address translation.
1386 * ASSUMES nothing is read from the I/O port before traps are taken.
1387 */
1388 if (!fIoChecked)
1389 {
1390 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1391 if (rcStrict != VINF_SUCCESS)
1392 return rcStrict;
1393 }
1394
1395 OP_TYPE uValue;
1396 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1397 if (rcStrict == VINF_SUCCESS)
1398 {
1399 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1400 rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8);
1401 else
1402 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1403 if (IOM_SUCCESS(rcStrict))
1404 {
1405 if (!pCtx->eflags.Bits.u1DF)
1406 pCtx->ADDR_rSI += OP_SIZE / 8;
1407 else
1408 pCtx->ADDR_rSI -= OP_SIZE / 8;
1409 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1410 if (rcStrict != VINF_SUCCESS)
1411 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1412 }
1413 }
1414 return rcStrict;
1415}
1416
1417
1418/**
1419 * Implements 'REP OUTS'.
1420 */
1421IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1422{
1423 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1424 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1425 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1426
1427 /*
1428 * Setup.
1429 */
1430 uint16_t const u16Port = pCtx->dx;
1431 VBOXSTRICTRC rcStrict;
1432 if (!fIoChecked)
1433 {
1434 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1435 if (rcStrict != VINF_SUCCESS)
1436 return rcStrict;
1437 }
1438
1439 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1440 if (uCounterReg == 0)
1441 {
1442 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1443 return VINF_SUCCESS;
1444 }
1445
1446 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1447 uint64_t uBaseAddr;
1448 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg, &uBaseAddr);
1449 if (rcStrict != VINF_SUCCESS)
1450 return rcStrict;
1451
1452 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1453 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1454
1455 /*
1456 * The loop.
1457 */
1458 do
1459 {
1460 /*
1461 * Do segmentation and virtual page stuff.
1462 */
1463 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1464 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1465 if (cLeftPage > uCounterReg)
1466 cLeftPage = uCounterReg;
1467 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1468 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1469 && ( IS_64_BIT_CODE(pIemCpu)
1470 || ( uAddrReg < pHid->u32Limit
1471 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1472 )
1473 && !IEM_VERIFICATION_ENABLED(pIemCpu)
1474 )
1475 {
1476 RTGCPHYS GCPhysMem;
1477 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1478 if (rcStrict != VINF_SUCCESS)
1479 return rcStrict;
1480
1481 /*
1482 * If we can map the page without trouble, we use the IOM
1483 * string I/O interface to do the job.
1484 */
1485 PGMPAGEMAPLOCK PgLockMem;
1486 OP_TYPE const *puMem;
1487 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1488 if (rcStrict == VINF_SUCCESS)
1489 {
1490 uint32_t cTransfers = cLeftPage;
1491 rcStrict = IOMIOPortWriteString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1492
1493 uint32_t cActualTransfers = cLeftPage - cTransfers;
1494 Assert(cActualTransfers <= cLeftPage);
1495 pCtx->ADDR_rSI = uAddrReg += cbIncr * cActualTransfers;
1496 pCtx->ADDR_rCX = uCounterReg -= cActualTransfers;
1497 puMem += cActualTransfers;
1498
1499 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1500
1501 if (rcStrict != VINF_SUCCESS)
1502 {
1503 if (IOM_SUCCESS(rcStrict))
1504 {
1505 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1506 if (uCounterReg == 0)
1507 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1508 }
1509 return rcStrict;
1510 }
1511
1512 /* If unaligned, we drop thru and do the page crossing access
1513 below. Otherwise, do the next page. */
1514 if (!(uVirtAddr & (OP_SIZE - 1)))
1515 continue;
1516 if (uCounterReg == 0)
1517 break;
1518 cLeftPage = 0;
1519 }
1520 }
1521
1522 /*
1523 * Fallback - slow processing till the end of the current page.
1524 * In the cross page boundrary case we will end up here with cLeftPage
1525 * as 0, we execute one loop then.
1526 *
1527 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1528 * I/O port, otherwise it wouldn't really be restartable.
1529 */
1530 /** @todo investigate what the CPU actually does with \#PF/\#GP
1531 * during INS. */
1532 do
1533 {
1534 OP_TYPE uValue;
1535 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1536 if (rcStrict != VINF_SUCCESS)
1537 return rcStrict;
1538
1539 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1540 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1541 else
1542 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1543 if (IOM_SUCCESS(rcStrict))
1544 {
1545 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1546 pCtx->ADDR_rCX = --uCounterReg;
1547 cLeftPage--;
1548 }
1549 if (rcStrict != VINF_SUCCESS)
1550 {
1551 if (IOM_SUCCESS(rcStrict))
1552 {
1553 if (uCounterReg == 0)
1554 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1555 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1556 }
1557 return rcStrict;
1558 }
1559 } while ((int32_t)cLeftPage > 0);
1560 } while (uCounterReg != 0);
1561
1562 /*
1563 * Done.
1564 */
1565 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1566 return VINF_SUCCESS;
1567}
1568
1569#endif /* OP_SIZE != 64-bit */
1570
1571
1572#undef OP_rAX
1573#undef OP_SIZE
1574#undef ADDR_SIZE
1575#undef ADDR_rDI
1576#undef ADDR_rSI
1577#undef ADDR_rCX
1578#undef ADDR_rIP
1579#undef ADDR2_TYPE
1580#undef ADDR_TYPE
1581#undef ADDR2_TYPE
1582#undef IS_64_BIT_CODE
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette