VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 60871

Last change on this file since 60871 was 60871, checked in by vboxsync, 9 years ago

IEMAllCImplStrInstr.cpp.h: Check FFs for each page and after each INS/OUTS rep. In raw-mode, temporarily enable interrupts too.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 63.8 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 60871 2016-05-07 17:51:35Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50# define IS_64_BIT_CODE(a_pIemCpu) (true)
51#else
52# error "Bad ADDR_SIZE."
53#endif
54#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
55
56#if ADDR_SIZE == 64 || OP_SIZE == 64
57# define IS_64_BIT_CODE(a_pIemCpu) (true)
58#elif ADDR_SIZE == 32
59# define IS_64_BIT_CODE(a_pIemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT)
60#else
61# define IS_64_BIT_CODE(a_pIemCpu) (false)
62#endif
63
64/** @def IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
65 * Used in the outer (page-by-page) loop to check for reasons for returnning
66 * before completing the instruction. In raw-mode we temporarily enable
67 * interrupts to let the host interrupt us. We cannot let big string operations
68 * hog the CPU, especially not in raw-mode.
69 */
70#ifdef IN_RC
71# define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \
72 do { \
73 if (RT_LIKELY( !VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
74 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
75 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK))) \
76 { \
77 RTCCUINTREG fSavedFlags = ASMGetFlags(); \
78 if (!(fSavedFlags & X86_EFL_IF)) \
79 { \
80 ASMSetFlags(fSavedFlags | X86_EFL_IF); \
81 ASMNopPause(); \
82 ASMSetFlags(fSavedFlags); \
83 } \
84 } \
85 else return VINF_SUCCESS; \
86 } while (0)
87#else
88# define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \
89 do { \
90 if (RT_LIKELY( !VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
91 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
92 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK))) \
93 { /* probable */ } \
94 else return VINF_SUCCESS; \
95 } while (0)
96#endif
97
98
99/** @def IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
100 * This is used in some of the inner loops to make sure we're responding quickly
101 * to outside requests. For I/O instructions this also make absolutely sure we
102 * don't miss out on important stuff that happened while processing a word.
103 */
104#define IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
105 do { \
106 if (RT_LIKELY( ( !VMCPU_FF_IS_PENDING(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
107 && !VM_FF_IS_PENDING(a_pVM, VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK)) \
108 || (a_fExitExpr))) \
109 { /* very likely */ } \
110 else return VINF_SUCCESS; \
111 } while (0)
112
113
114/**
115 * Implements 'REPE CMPS'.
116 */
117IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
118{
119 PVM pVM = IEMCPU_TO_VM(pIemCpu);
120 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
121 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
122
123 /*
124 * Setup.
125 */
126 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
127 if (uCounterReg == 0)
128 {
129 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
130 return VINF_SUCCESS;
131 }
132
133 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
134 uint64_t uSrc1Base;
135 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
136 if (rcStrict != VINF_SUCCESS)
137 return rcStrict;
138
139 uint64_t uSrc2Base;
140 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
141 if (rcStrict != VINF_SUCCESS)
142 return rcStrict;
143
144 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
145 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
146 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
147 uint32_t uEFlags = pCtx->eflags.u;
148
149 /*
150 * The loop.
151 */
152 for (;;)
153 {
154 /*
155 * Do segmentation and virtual page stuff.
156 */
157 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
158 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
159 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
160 if (cLeftSrc1Page > uCounterReg)
161 cLeftSrc1Page = uCounterReg;
162 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
163 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
164
165 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
166 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
167 && ( IS_64_BIT_CODE(pIemCpu)
168 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
169 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
170 && uSrc2AddrReg < pCtx->es.u32Limit
171 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
172 )
173 )
174 {
175 RTGCPHYS GCPhysSrc1Mem;
176 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
177 if (rcStrict != VINF_SUCCESS)
178 return rcStrict;
179
180 RTGCPHYS GCPhysSrc2Mem;
181 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
182 if (rcStrict != VINF_SUCCESS)
183 return rcStrict;
184
185 /*
186 * If we can map the page without trouble, do a block processing
187 * until the end of the current page.
188 */
189 PGMPAGEMAPLOCK PgLockSrc2Mem;
190 OP_TYPE const *puSrc2Mem;
191 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
192 if (rcStrict == VINF_SUCCESS)
193 {
194 PGMPAGEMAPLOCK PgLockSrc1Mem;
195 OP_TYPE const *puSrc1Mem;
196 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
197 if (rcStrict == VINF_SUCCESS)
198 {
199 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
200 {
201 /* All matches, only compare the last itme to get the right eflags. */
202 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
203 uSrc1AddrReg += cLeftPage * cbIncr;
204 uSrc2AddrReg += cLeftPage * cbIncr;
205 uCounterReg -= cLeftPage;
206 }
207 else
208 {
209 /* Some mismatch, compare each item (and keep volatile
210 memory in mind). */
211 uint32_t off = 0;
212 do
213 {
214 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
215 off++;
216 } while ( off < cLeftPage
217 && (uEFlags & X86_EFL_ZF));
218 uSrc1AddrReg += cbIncr * off;
219 uSrc2AddrReg += cbIncr * off;
220 uCounterReg -= off;
221 }
222
223 /* Update the registers before looping. */
224 pCtx->ADDR_rCX = uCounterReg;
225 pCtx->ADDR_rSI = uSrc1AddrReg;
226 pCtx->ADDR_rDI = uSrc2AddrReg;
227 pCtx->eflags.u = uEFlags;
228
229 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
230 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
231 if ( uCounterReg == 0
232 || !(uEFlags & X86_EFL_ZF))
233 break;
234 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
235 continue;
236 }
237 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
238 }
239 }
240
241 /*
242 * Fallback - slow processing till the end of the current page.
243 * In the cross page boundrary case we will end up here with cLeftPage
244 * as 0, we execute one loop then.
245 */
246 do
247 {
248 OP_TYPE uValue1;
249 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
250 if (rcStrict != VINF_SUCCESS)
251 return rcStrict;
252 OP_TYPE uValue2;
253 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
254 if (rcStrict != VINF_SUCCESS)
255 return rcStrict;
256 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
257
258 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
259 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
260 pCtx->ADDR_rCX = --uCounterReg;
261 pCtx->eflags.u = uEFlags;
262 cLeftPage--;
263 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
264 } while ( (int32_t)cLeftPage > 0
265 && (uEFlags & X86_EFL_ZF));
266
267 /*
268 * Next page? Must check for interrupts and stuff here.
269 */
270 if ( uCounterReg == 0
271 || !(uEFlags & X86_EFL_ZF))
272 break;
273 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
274 }
275
276 /*
277 * Done.
278 */
279 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
280 return VINF_SUCCESS;
281}
282
283
284/**
285 * Implements 'REPNE CMPS'.
286 */
287IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
288{
289 PVM pVM = IEMCPU_TO_VM(pIemCpu);
290 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
291 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
292
293 /*
294 * Setup.
295 */
296 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
297 if (uCounterReg == 0)
298 {
299 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
300 return VINF_SUCCESS;
301 }
302
303 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
304 uint64_t uSrc1Base;
305 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
306 if (rcStrict != VINF_SUCCESS)
307 return rcStrict;
308
309 uint64_t uSrc2Base;
310 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
311 if (rcStrict != VINF_SUCCESS)
312 return rcStrict;
313
314 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
315 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
316 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
317 uint32_t uEFlags = pCtx->eflags.u;
318
319 /*
320 * The loop.
321 */
322 for (;;)
323 {
324 /*
325 * Do segmentation and virtual page stuff.
326 */
327 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
328 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
329 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
330 if (cLeftSrc1Page > uCounterReg)
331 cLeftSrc1Page = uCounterReg;
332 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
333 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
334
335 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
336 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
337 && ( IS_64_BIT_CODE(pIemCpu)
338 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
339 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
340 && uSrc2AddrReg < pCtx->es.u32Limit
341 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
342 )
343 )
344 {
345 RTGCPHYS GCPhysSrc1Mem;
346 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
347 if (rcStrict != VINF_SUCCESS)
348 return rcStrict;
349
350 RTGCPHYS GCPhysSrc2Mem;
351 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
352 if (rcStrict != VINF_SUCCESS)
353 return rcStrict;
354
355 /*
356 * If we can map the page without trouble, do a block processing
357 * until the end of the current page.
358 */
359 OP_TYPE const *puSrc2Mem;
360 PGMPAGEMAPLOCK PgLockSrc2Mem;
361 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
362 if (rcStrict == VINF_SUCCESS)
363 {
364 OP_TYPE const *puSrc1Mem;
365 PGMPAGEMAPLOCK PgLockSrc1Mem;
366 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
367 if (rcStrict == VINF_SUCCESS)
368 {
369 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
370 {
371 /* All matches, only compare the last item to get the right eflags. */
372 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
373 uSrc1AddrReg += cLeftPage * cbIncr;
374 uSrc2AddrReg += cLeftPage * cbIncr;
375 uCounterReg -= cLeftPage;
376 }
377 else
378 {
379 /* Some mismatch, compare each item (and keep volatile
380 memory in mind). */
381 uint32_t off = 0;
382 do
383 {
384 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
385 off++;
386 } while ( off < cLeftPage
387 && !(uEFlags & X86_EFL_ZF));
388 uSrc1AddrReg += cbIncr * off;
389 uSrc2AddrReg += cbIncr * off;
390 uCounterReg -= off;
391 }
392
393 /* Update the registers before looping. */
394 pCtx->ADDR_rCX = uCounterReg;
395 pCtx->ADDR_rSI = uSrc1AddrReg;
396 pCtx->ADDR_rDI = uSrc2AddrReg;
397 pCtx->eflags.u = uEFlags;
398
399 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
400 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
401 if ( uCounterReg == 0
402 || (uEFlags & X86_EFL_ZF))
403 break;
404 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
405 continue;
406 }
407 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
408 }
409 }
410
411 /*
412 * Fallback - slow processing till the end of the current page.
413 * In the cross page boundrary case we will end up here with cLeftPage
414 * as 0, we execute one loop then.
415 */
416 do
417 {
418 OP_TYPE uValue1;
419 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
420 if (rcStrict != VINF_SUCCESS)
421 return rcStrict;
422 OP_TYPE uValue2;
423 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
424 if (rcStrict != VINF_SUCCESS)
425 return rcStrict;
426 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
427
428 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
429 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
430 pCtx->ADDR_rCX = --uCounterReg;
431 pCtx->eflags.u = uEFlags;
432 cLeftPage--;
433 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
434 } while ( (int32_t)cLeftPage > 0
435 && !(uEFlags & X86_EFL_ZF));
436
437 /*
438 * Next page? Must check for interrupts and stuff here.
439 */
440 if ( uCounterReg == 0
441 || (uEFlags & X86_EFL_ZF))
442 break;
443 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
444 }
445
446 /*
447 * Done.
448 */
449 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
450 return VINF_SUCCESS;
451}
452
453
454/**
455 * Implements 'REPE SCAS'.
456 */
457IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
458{
459 PVM pVM = IEMCPU_TO_VM(pIemCpu);
460 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
461 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
462
463 /*
464 * Setup.
465 */
466 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
467 if (uCounterReg == 0)
468 {
469 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
470 return VINF_SUCCESS;
471 }
472
473 uint64_t uBaseAddr;
474 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
475 if (rcStrict != VINF_SUCCESS)
476 return rcStrict;
477
478 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
479 OP_TYPE const uValueReg = pCtx->OP_rAX;
480 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
481 uint32_t uEFlags = pCtx->eflags.u;
482
483 /*
484 * The loop.
485 */
486 for (;;)
487 {
488 /*
489 * Do segmentation and virtual page stuff.
490 */
491 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
492 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
493 if (cLeftPage > uCounterReg)
494 cLeftPage = uCounterReg;
495 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
496 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
497 && ( IS_64_BIT_CODE(pIemCpu)
498 || ( uAddrReg < pCtx->es.u32Limit
499 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
500 )
501 )
502 {
503 RTGCPHYS GCPhysMem;
504 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
505 if (rcStrict != VINF_SUCCESS)
506 return rcStrict;
507
508 /*
509 * If we can map the page without trouble, do a block processing
510 * until the end of the current page.
511 */
512 PGMPAGEMAPLOCK PgLockMem;
513 OP_TYPE const *puMem;
514 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
515 if (rcStrict == VINF_SUCCESS)
516 {
517 /* Search till we find a mismatching item. */
518 OP_TYPE uTmpValue;
519 bool fQuit;
520 uint32_t i = 0;
521 do
522 {
523 uTmpValue = puMem[i++];
524 fQuit = uTmpValue != uValueReg;
525 } while (i < cLeftPage && !fQuit);
526
527 /* Update the regs. */
528 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
529 pCtx->ADDR_rCX = uCounterReg -= i;
530 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
531 pCtx->eflags.u = uEFlags;
532 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
533 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
534 if ( fQuit
535 || uCounterReg == 0)
536 break;
537
538 /* If unaligned, we drop thru and do the page crossing access
539 below. Otherwise, do the next page. */
540 if (!(uVirtAddr & (OP_SIZE - 1)))
541 {
542 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
543 continue;
544 }
545 cLeftPage = 0;
546 }
547 }
548
549 /*
550 * Fallback - slow processing till the end of the current page.
551 * In the cross page boundrary case we will end up here with cLeftPage
552 * as 0, we execute one loop then.
553 */
554 do
555 {
556 OP_TYPE uTmpValue;
557 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
558 if (rcStrict != VINF_SUCCESS)
559 return rcStrict;
560 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
561
562 pCtx->ADDR_rDI = uAddrReg += cbIncr;
563 pCtx->ADDR_rCX = --uCounterReg;
564 pCtx->eflags.u = uEFlags;
565 cLeftPage--;
566 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
567 } while ( (int32_t)cLeftPage > 0
568 && (uEFlags & X86_EFL_ZF));
569
570 /*
571 * Next page? Must check for interrupts and stuff here.
572 */
573 if ( uCounterReg == 0
574 || !(uEFlags & X86_EFL_ZF))
575 break;
576 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
577 }
578
579 /*
580 * Done.
581 */
582 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
583 return VINF_SUCCESS;
584}
585
586
587/**
588 * Implements 'REPNE SCAS'.
589 */
590IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
591{
592 PVM pVM = IEMCPU_TO_VM(pIemCpu);
593 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
594 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
595
596 /*
597 * Setup.
598 */
599 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
600 if (uCounterReg == 0)
601 {
602 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
603 return VINF_SUCCESS;
604 }
605
606 uint64_t uBaseAddr;
607 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
608 if (rcStrict != VINF_SUCCESS)
609 return rcStrict;
610
611 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
612 OP_TYPE const uValueReg = pCtx->OP_rAX;
613 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
614 uint32_t uEFlags = pCtx->eflags.u;
615
616 /*
617 * The loop.
618 */
619 for (;;)
620 {
621 /*
622 * Do segmentation and virtual page stuff.
623 */
624 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
625 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
626 if (cLeftPage > uCounterReg)
627 cLeftPage = uCounterReg;
628 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
629 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
630 && ( IS_64_BIT_CODE(pIemCpu)
631 || ( uAddrReg < pCtx->es.u32Limit
632 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
633 )
634 )
635 {
636 RTGCPHYS GCPhysMem;
637 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
638 if (rcStrict != VINF_SUCCESS)
639 return rcStrict;
640
641 /*
642 * If we can map the page without trouble, do a block processing
643 * until the end of the current page.
644 */
645 PGMPAGEMAPLOCK PgLockMem;
646 OP_TYPE const *puMem;
647 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
648 if (rcStrict == VINF_SUCCESS)
649 {
650 /* Search till we find a mismatching item. */
651 OP_TYPE uTmpValue;
652 bool fQuit;
653 uint32_t i = 0;
654 do
655 {
656 uTmpValue = puMem[i++];
657 fQuit = uTmpValue == uValueReg;
658 } while (i < cLeftPage && !fQuit);
659
660 /* Update the regs. */
661 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
662 pCtx->ADDR_rCX = uCounterReg -= i;
663 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
664 pCtx->eflags.u = uEFlags;
665 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
666 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
667 if ( fQuit
668 || uCounterReg == 0)
669 break;
670
671 /* If unaligned, we drop thru and do the page crossing access
672 below. Otherwise, do the next page. */
673 if (!(uVirtAddr & (OP_SIZE - 1)))
674 {
675 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
676 continue;
677 }
678 cLeftPage = 0;
679 }
680 }
681
682 /*
683 * Fallback - slow processing till the end of the current page.
684 * In the cross page boundrary case we will end up here with cLeftPage
685 * as 0, we execute one loop then.
686 */
687 do
688 {
689 OP_TYPE uTmpValue;
690 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
691 if (rcStrict != VINF_SUCCESS)
692 return rcStrict;
693 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
694 pCtx->ADDR_rDI = uAddrReg += cbIncr;
695 pCtx->ADDR_rCX = --uCounterReg;
696 pCtx->eflags.u = uEFlags;
697 cLeftPage--;
698 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
699 } while ( (int32_t)cLeftPage > 0
700 && !(uEFlags & X86_EFL_ZF));
701
702 /*
703 * Next page? Must check for interrupts and stuff here.
704 */
705 if ( uCounterReg == 0
706 || (uEFlags & X86_EFL_ZF))
707 break;
708 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
709 }
710
711 /*
712 * Done.
713 */
714 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
715 return VINF_SUCCESS;
716}
717
718
719
720
721/**
722 * Implements 'REP MOVS'.
723 */
724IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
725{
726 PVM pVM = IEMCPU_TO_VM(pIemCpu);
727 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
728 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
729
730 /*
731 * Setup.
732 */
733 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
734 if (uCounterReg == 0)
735 {
736 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
737 return VINF_SUCCESS;
738 }
739
740 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
741 uint64_t uSrcBase;
742 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uSrcBase);
743 if (rcStrict != VINF_SUCCESS)
744 return rcStrict;
745
746 uint64_t uDstBase;
747 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uDstBase);
748 if (rcStrict != VINF_SUCCESS)
749 return rcStrict;
750
751 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
752 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
753 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
754
755 /*
756 * Be careful with handle bypassing.
757 */
758 if (pIemCpu->fBypassHandlers)
759 {
760 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
761 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
762 }
763
764 /*
765 * If we're reading back what we write, we have to let the verfication code
766 * to prevent a false positive.
767 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
768 */
769#ifdef IEM_VERIFICATION_MODE_FULL
770 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
771 && (cbIncr > 0
772 ? uSrcAddrReg <= uDstAddrReg
773 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
774 : uDstAddrReg <= uSrcAddrReg
775 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
776 pIemCpu->fOverlappingMovs = true;
777#endif
778
779 /*
780 * The loop.
781 */
782 for (;;)
783 {
784 /*
785 * Do segmentation and virtual page stuff.
786 */
787 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
788 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
789 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
790 if (cLeftSrcPage > uCounterReg)
791 cLeftSrcPage = uCounterReg;
792 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
793 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
794
795 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
796 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
797 && ( IS_64_BIT_CODE(pIemCpu)
798 || ( uSrcAddrReg < pSrcHid->u32Limit
799 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
800 && uDstAddrReg < pCtx->es.u32Limit
801 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
802 )
803 )
804 {
805 RTGCPHYS GCPhysSrcMem;
806 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
807 if (rcStrict != VINF_SUCCESS)
808 return rcStrict;
809
810 RTGCPHYS GCPhysDstMem;
811 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
812 if (rcStrict != VINF_SUCCESS)
813 return rcStrict;
814
815 /*
816 * If we can map the page without trouble, do a block processing
817 * until the end of the current page.
818 */
819 PGMPAGEMAPLOCK PgLockDstMem;
820 OP_TYPE *puDstMem;
821 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
822 if (rcStrict == VINF_SUCCESS)
823 {
824 PGMPAGEMAPLOCK PgLockSrcMem;
825 OP_TYPE const *puSrcMem;
826 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
827 if (rcStrict == VINF_SUCCESS)
828 {
829 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
830 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
831
832 /* Perform the operation exactly (don't use memcpy to avoid
833 having to consider how its implementation would affect
834 any overlapping source and destination area). */
835 OP_TYPE const *puSrcCur = puSrcMem;
836 OP_TYPE *puDstCur = puDstMem;
837 uint32_t cTodo = cLeftPage;
838 while (cTodo-- > 0)
839 *puDstCur++ = *puSrcCur++;
840
841 /* Update the registers. */
842 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
843 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
844 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
845
846 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
847 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
848
849 if (uCounterReg == 0)
850 break;
851 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u);
852 continue;
853 }
854 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
855 }
856 }
857
858 /*
859 * Fallback - slow processing till the end of the current page.
860 * In the cross page boundrary case we will end up here with cLeftPage
861 * as 0, we execute one loop then.
862 */
863 do
864 {
865 OP_TYPE uValue;
866 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
867 if (rcStrict != VINF_SUCCESS)
868 return rcStrict;
869 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
870 if (rcStrict != VINF_SUCCESS)
871 return rcStrict;
872
873 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
874 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
875 pCtx->ADDR_rCX = --uCounterReg;
876 cLeftPage--;
877 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
878 } while ((int32_t)cLeftPage > 0);
879
880 /*
881 * Next page. Must check for interrupts and stuff here.
882 */
883 if (uCounterReg == 0)
884 break;
885 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u);
886 }
887
888 /*
889 * Done.
890 */
891 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
892 return VINF_SUCCESS;
893}
894
895
896/**
897 * Implements 'REP STOS'.
898 */
899IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
900{
901 PVM pVM = IEMCPU_TO_VM(pIemCpu);
902 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
903 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
904
905 /*
906 * Setup.
907 */
908 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
909 if (uCounterReg == 0)
910 {
911 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
912 return VINF_SUCCESS;
913 }
914
915 uint64_t uBaseAddr;
916 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
917 if (rcStrict != VINF_SUCCESS)
918 return rcStrict;
919
920 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
921 OP_TYPE const uValue = pCtx->OP_rAX;
922 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
923
924 /*
925 * Be careful with handle bypassing.
926 */
927 /** @todo Permit doing a page if correctly aligned. */
928 if (pIemCpu->fBypassHandlers)
929 {
930 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
931 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
932 }
933
934 /*
935 * The loop.
936 */
937 for (;;)
938 {
939 /*
940 * Do segmentation and virtual page stuff.
941 */
942 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
943 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
944 if (cLeftPage > uCounterReg)
945 cLeftPage = uCounterReg;
946 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
947 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
948 && ( IS_64_BIT_CODE(pIemCpu)
949 || ( uAddrReg < pCtx->es.u32Limit
950 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
951 )
952 )
953 {
954 RTGCPHYS GCPhysMem;
955 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
956 if (rcStrict != VINF_SUCCESS)
957 return rcStrict;
958
959 /*
960 * If we can map the page without trouble, do a block processing
961 * until the end of the current page.
962 */
963 PGMPAGEMAPLOCK PgLockMem;
964 OP_TYPE *puMem;
965 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
966 if (rcStrict == VINF_SUCCESS)
967 {
968 /* Update the regs first so we can loop on cLeftPage. */
969 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
970 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
971
972 /* Do the memsetting. */
973#if OP_SIZE == 8
974 memset(puMem, uValue, cLeftPage);
975/*#elif OP_SIZE == 32
976 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
977#else
978 while (cLeftPage-- > 0)
979 *puMem++ = uValue;
980#endif
981
982 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
983
984 if (uCounterReg == 0)
985 break;
986
987 /* If unaligned, we drop thru and do the page crossing access
988 below. Otherwise, do the next page. */
989 if (!(uVirtAddr & (OP_SIZE - 1)))
990 {
991 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u);
992 continue;
993 }
994 cLeftPage = 0;
995 }
996 }
997
998 /*
999 * Fallback - slow processing till the end of the current page.
1000 * In the cross page boundrary case we will end up here with cLeftPage
1001 * as 0, we execute one loop then.
1002 */
1003 do
1004 {
1005 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
1006 if (rcStrict != VINF_SUCCESS)
1007 return rcStrict;
1008 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1009 pCtx->ADDR_rCX = --uCounterReg;
1010 cLeftPage--;
1011 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
1012 } while ((int32_t)cLeftPage > 0);
1013
1014 /*
1015 * Next page. Must check for interrupts and stuff here.
1016 */
1017 if (uCounterReg == 0)
1018 break;
1019 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u);
1020 }
1021
1022 /*
1023 * Done.
1024 */
1025 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1026 return VINF_SUCCESS;
1027}
1028
1029
1030/**
1031 * Implements 'REP LODS'.
1032 */
1033IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
1034{
1035 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1036 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1037 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1038
1039 /*
1040 * Setup.
1041 */
1042 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1043 if (uCounterReg == 0)
1044 {
1045 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1046 return VINF_SUCCESS;
1047 }
1048
1049 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
1050 uint64_t uBaseAddr;
1051 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uBaseAddr);
1052 if (rcStrict != VINF_SUCCESS)
1053 return rcStrict;
1054
1055 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1056 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1057
1058 /*
1059 * The loop.
1060 */
1061 for (;;)
1062 {
1063 /*
1064 * Do segmentation and virtual page stuff.
1065 */
1066 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1067 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1068 if (cLeftPage > uCounterReg)
1069 cLeftPage = uCounterReg;
1070 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1071 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1072 && ( IS_64_BIT_CODE(pIemCpu)
1073 || ( uAddrReg < pSrcHid->u32Limit
1074 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
1075 )
1076 )
1077 {
1078 RTGCPHYS GCPhysMem;
1079 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1080 if (rcStrict != VINF_SUCCESS)
1081 return rcStrict;
1082
1083 /*
1084 * If we can map the page without trouble, we can get away with
1085 * just reading the last value on the page.
1086 */
1087 PGMPAGEMAPLOCK PgLockMem;
1088 OP_TYPE const *puMem;
1089 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1090 if (rcStrict == VINF_SUCCESS)
1091 {
1092 /* Only get the last byte, the rest doesn't matter in direct access mode. */
1093#if OP_SIZE == 32
1094 pCtx->rax = puMem[cLeftPage - 1];
1095#else
1096 pCtx->OP_rAX = puMem[cLeftPage - 1];
1097#endif
1098 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
1099 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
1100 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1101
1102 if (uCounterReg == 0)
1103 break;
1104
1105 /* If unaligned, we drop thru and do the page crossing access
1106 below. Otherwise, do the next page. */
1107 if (!(uVirtAddr & (OP_SIZE - 1)))
1108 {
1109 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u);
1110 continue;
1111 }
1112 cLeftPage = 0;
1113 }
1114 }
1115
1116 /*
1117 * Fallback - slow processing till the end of the current page.
1118 * In the cross page boundrary case we will end up here with cLeftPage
1119 * as 0, we execute one loop then.
1120 */
1121 do
1122 {
1123 OP_TYPE uTmpValue;
1124 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
1125 if (rcStrict != VINF_SUCCESS)
1126 return rcStrict;
1127#if OP_SIZE == 32
1128 pCtx->rax = uTmpValue;
1129#else
1130 pCtx->OP_rAX = uTmpValue;
1131#endif
1132 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1133 pCtx->ADDR_rCX = --uCounterReg;
1134 cLeftPage--;
1135 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
1136 } while ((int32_t)cLeftPage > 0);
1137
1138 if (rcStrict != VINF_SUCCESS)
1139 break;
1140
1141 /*
1142 * Next page. Must check for interrupts and stuff here.
1143 */
1144 if (uCounterReg == 0)
1145 break;
1146 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u);
1147 }
1148
1149 /*
1150 * Done.
1151 */
1152 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1153 return VINF_SUCCESS;
1154}
1155
1156
1157#if OP_SIZE != 64
1158
1159# if !defined(IN_RING3) && !defined(IEMCIMPL_INS_INLINES)
1160# define IEMCIMPL_INS_INLINES 1
1161
1162/**
1163 * Check if we should postpone committing an INS instruction to ring-3, or if we
1164 * should rather panic.
1165 *
1166 * @returns true if we should postpone it, false if it's better to panic.
1167 * @param rcStrictMem The status code returned by the memory write.
1168 */
1169DECLINLINE(bool) iemCImpl_ins_shouldPostponeCommitToRing3(VBOXSTRICTRC rcStrictMem)
1170{
1171 /*
1172 * The following requires executing the write in ring-3.
1173 * See PGMPhysWrite for status code explanations.
1174 */
1175 if ( rcStrictMem == VINF_IOM_R3_MMIO_WRITE
1176 || rcStrictMem == VINF_IOM_R3_MMIO_READ_WRITE
1177 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR
1178# ifdef IN_RC
1179 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
1180 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
1181 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
1182 || rcStrictMem == VINF_CSAM_PENDING_ACTION
1183 || rcStrictMem == VINF_PATM_CHECK_PATCH_PAGE
1184# endif
1185 )
1186 return true;
1187
1188 /* For the other status code, the pass-up handling should already have
1189 caught them. So, anything getting down here is a real problem worth
1190 meditating over. */
1191 return false;
1192}
1193
1194
1195/**
1196 * Merges a iemCImpl_ins_shouldPostponeCommitToRing3() status with the I/O port
1197 * status.
1198 *
1199 * @returns status code.
1200 * @param rcStrictPort The status returned by the I/O port read.
1201 * @param rcStrictMem The status code returned by the memory write.
1202 */
1203DECLINLINE(VBOXSTRICTRC) iemCImpl_ins_mergePostponedCommitStatuses(VBOXSTRICTRC rcStrictPort, VBOXSTRICTRC rcStrictMem)
1204{
1205 /* Turns out we don't need a lot of merging, since we'll be redoing the
1206 write anyway. (CSAM, PATM status codes, perhaps, but that's about it.) */
1207 return rcStrictPort == VINF_SUCCESS ? VINF_EM_RAW_TO_R3 : rcStrictPort;
1208}
1209
1210# endif /* !IN_RING3 || !IEMCIMPL_INS_INLINES */
1211
1212
1213/**
1214 * Implements 'INS' (no rep)
1215 */
1216IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1217{
1218 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1219 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1220 VBOXSTRICTRC rcStrict;
1221
1222 /*
1223 * Be careful with handle bypassing.
1224 */
1225 if (pIemCpu->fBypassHandlers)
1226 {
1227 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1228 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1229 }
1230
1231 /*
1232 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1233 * segmentation and finally any #PF due to virtual address translation.
1234 * ASSUMES nothing is read from the I/O port before traps are taken.
1235 */
1236 if (!fIoChecked)
1237 {
1238 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1239 if (rcStrict != VINF_SUCCESS)
1240 return rcStrict;
1241 }
1242
1243 OP_TYPE *puMem;
1244 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1245 if (rcStrict != VINF_SUCCESS)
1246 return rcStrict;
1247
1248 uint32_t u32Value = 0;
1249 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1250 rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8);
1251 else
1252 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1253 if (IOM_SUCCESS(rcStrict))
1254 {
1255 *puMem = (OP_TYPE)u32Value;
1256 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1257 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1258 {
1259 if (!pCtx->eflags.Bits.u1DF)
1260 pCtx->ADDR_rDI += OP_SIZE / 8;
1261 else
1262 pCtx->ADDR_rDI -= OP_SIZE / 8;
1263 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1264 }
1265#ifndef IN_RING3
1266 /* iemMemMap already checked permissions, so this may only be real errors
1267 or access handlers meddling. In the access handler case, we must postpone
1268 the instruction committing to ring-3. */
1269 else if (iemCImpl_ins_shouldPostponeCommitToRing3(rcStrict2))
1270 {
1271 pIemCpu->PendingCommit.cbInstr = cbInstr;
1272 pIemCpu->PendingCommit.uValue = u32Value;
1273 pIemCpu->PendingCommit.enmFn = RT_CONCAT4(IEMCOMMIT_INS_OP,OP_SIZE,_ADDR,ADDR_SIZE);
1274 pIemCpu->cPendingCommit++;
1275 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
1276 Log(("%s: Postponing to ring-3; cbInstr=%#x u32Value=%#x rcStrict2=%Rrc rcStrict=%Rrc\n", __FUNCTION__,
1277 cbInstr, u32Value, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict)));
1278 rcStrict = iemCImpl_ins_mergePostponedCommitStatuses(rcStrict, rcStrict2);
1279 }
1280#endif
1281 else
1282 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), RT_FAILURE_NP(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1283 }
1284 return rcStrict;
1285}
1286
1287
1288# ifdef IN_RING3
1289/**
1290 * Called in ring-3 when raw-mode or ring-0 was forced to return while
1291 * committing the instruction (hit access handler).
1292 */
1293IEM_CIMPL_DEF_0(RT_CONCAT4(iemR3CImpl_commit_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1294{
1295 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1296 VBOXSTRICTRC rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, pCtx->ADDR_rDI, (OP_TYPE)pIemCpu->PendingCommit.uValue);
1297 if (rcStrict == VINF_SUCCESS)
1298 {
1299 if (!pCtx->eflags.Bits.u1DF)
1300 pCtx->ADDR_rDI += OP_SIZE / 8;
1301 else
1302 pCtx->ADDR_rDI -= OP_SIZE / 8;
1303 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1304 }
1305 return rcStrict;
1306}
1307# endif /* IN_RING3 */
1308
1309
1310/**
1311 * Implements 'REP INS'.
1312 */
1313IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1314{
1315 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1316 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1317 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1318
1319 /*
1320 * Setup.
1321 */
1322 uint16_t const u16Port = pCtx->dx;
1323 VBOXSTRICTRC rcStrict;
1324 if (!fIoChecked)
1325 {
1326 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1327 if (rcStrict != VINF_SUCCESS)
1328 return rcStrict;
1329 }
1330
1331 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1332 if (uCounterReg == 0)
1333 {
1334 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1335 return VINF_SUCCESS;
1336 }
1337
1338 uint64_t uBaseAddr;
1339 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
1340 if (rcStrict != VINF_SUCCESS)
1341 return rcStrict;
1342
1343 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1344 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1345
1346 /*
1347 * Be careful with handle bypassing.
1348 */
1349 if (pIemCpu->fBypassHandlers)
1350 {
1351 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1352 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1353 }
1354
1355 /*
1356 * The loop.
1357 */
1358 for (;;)
1359 {
1360 /*
1361 * Do segmentation and virtual page stuff.
1362 */
1363 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1364 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1365 if (cLeftPage > uCounterReg)
1366 cLeftPage = uCounterReg;
1367 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1368 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1369 && ( IS_64_BIT_CODE(pIemCpu)
1370 || ( uAddrReg < pCtx->es.u32Limit
1371 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
1372 )
1373 && !IEM_VERIFICATION_ENABLED(pIemCpu)
1374 )
1375 {
1376 RTGCPHYS GCPhysMem;
1377 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1378 if (rcStrict != VINF_SUCCESS)
1379 return rcStrict;
1380
1381 /*
1382 * If we can map the page without trouble, use the IOM
1383 * string I/O interface to do the work.
1384 */
1385 PGMPAGEMAPLOCK PgLockMem;
1386 OP_TYPE *puMem;
1387 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1388 if (rcStrict == VINF_SUCCESS)
1389 {
1390 uint32_t cTransfers = cLeftPage;
1391 rcStrict = IOMIOPortReadString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1392
1393 uint32_t cActualTransfers = cLeftPage - cTransfers;
1394 Assert(cActualTransfers <= cLeftPage);
1395 pCtx->ADDR_rDI = uAddrReg += cbIncr * cActualTransfers;
1396 pCtx->ADDR_rCX = uCounterReg -= cActualTransfers;
1397 puMem += cActualTransfers;
1398
1399 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1400
1401 if (rcStrict != VINF_SUCCESS)
1402 {
1403 if (IOM_SUCCESS(rcStrict))
1404 {
1405 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1406 if (uCounterReg == 0)
1407 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1408 }
1409 return rcStrict;
1410 }
1411
1412 /* If unaligned, we drop thru and do the page crossing access
1413 below. Otherwise, do the next page. */
1414 if (uCounterReg == 0)
1415 break;
1416 if (!(uVirtAddr & (OP_SIZE - 1)))
1417 {
1418 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u);
1419 continue;
1420 }
1421 cLeftPage = 0;
1422 }
1423 }
1424
1425 /*
1426 * Fallback - slow processing till the end of the current page.
1427 * In the cross page boundrary case we will end up here with cLeftPage
1428 * as 0, we execute one loop then.
1429 *
1430 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1431 * I/O port, otherwise it wouldn't really be restartable.
1432 */
1433 /** @todo investigate what the CPU actually does with \#PF/\#GP
1434 * during INS. */
1435 do
1436 {
1437 OP_TYPE *puMem;
1438 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1439 if (rcStrict != VINF_SUCCESS)
1440 return rcStrict;
1441
1442 uint32_t u32Value = 0;
1443 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1444 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1445 else
1446 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1447 if (!IOM_SUCCESS(rcStrict))
1448 return rcStrict;
1449
1450 *puMem = (OP_TYPE)u32Value;
1451 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1452 if (rcStrict2 == VINF_SUCCESS)
1453 { /* likely */ }
1454#ifndef IN_RING3
1455 /* iemMemMap already checked permissions, so this may only be real errors
1456 or access handlers meddling. In the access handler case, we must postpone
1457 the instruction committing to ring-3. */
1458 else if (iemCImpl_ins_shouldPostponeCommitToRing3(rcStrict2))
1459 {
1460 pIemCpu->PendingCommit.cbInstr = cbInstr;
1461 pIemCpu->PendingCommit.uValue = u32Value;
1462 pIemCpu->PendingCommit.enmFn = RT_CONCAT4(IEMCOMMIT_REP_INS_OP,OP_SIZE,_ADDR,ADDR_SIZE);
1463 pIemCpu->cPendingCommit++;
1464 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
1465 Log(("%s: Postponing to ring-3; cbInstr=%#x u32Value=%#x rcStrict2=%Rrc rcStrict=%Rrc\n", __FUNCTION__,
1466 cbInstr, u32Value, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict)));
1467 return iemCImpl_ins_mergePostponedCommitStatuses(rcStrict, rcStrict2);
1468 }
1469#endif
1470 else
1471 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1472 RT_FAILURE(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1473
1474 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1475 pCtx->ADDR_rCX = --uCounterReg;
1476
1477 cLeftPage--;
1478 if (rcStrict != VINF_SUCCESS)
1479 {
1480 if (uCounterReg == 0)
1481 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1482 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1483 return rcStrict;
1484 }
1485 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1486 } while ((int32_t)cLeftPage > 0);
1487
1488
1489 /*
1490 * Next page. Must check for interrupts and stuff here.
1491 */
1492 if (uCounterReg == 0)
1493 break;
1494 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u);
1495 }
1496
1497 /*
1498 * Done.
1499 */
1500 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1501 return VINF_SUCCESS;
1502}
1503
1504# ifdef IN_RING3
1505/**
1506 * Called in ring-3 when raw-mode or ring-0 was forced to return while
1507 * committing the instruction (hit access handler).
1508 */
1509IEM_CIMPL_DEF_0(RT_CONCAT4(iemR3CImpl_commit_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1510{
1511 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1512 VBOXSTRICTRC rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, pCtx->ADDR_rDI, (OP_TYPE)pIemCpu->PendingCommit.uValue);
1513 if (rcStrict == VINF_SUCCESS)
1514 {
1515 if (!pCtx->eflags.Bits.u1DF)
1516 pCtx->ADDR_rDI += OP_SIZE / 8;
1517 else
1518 pCtx->ADDR_rDI -= OP_SIZE / 8;
1519 pCtx->ADDR_rCX -= 1;
1520 if (pCtx->ADDR_rCX == 0)
1521 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1522 }
1523 return rcStrict;
1524}
1525# endif /* IN_RING3 */
1526
1527
1528/**
1529 * Implements 'OUTS' (no rep)
1530 */
1531IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1532{
1533 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1534 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1535 VBOXSTRICTRC rcStrict;
1536
1537 /*
1538 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1539 * segmentation and finally any #PF due to virtual address translation.
1540 * ASSUMES nothing is read from the I/O port before traps are taken.
1541 */
1542 if (!fIoChecked)
1543 {
1544 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1545 if (rcStrict != VINF_SUCCESS)
1546 return rcStrict;
1547 }
1548
1549 OP_TYPE uValue;
1550 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1551 if (rcStrict == VINF_SUCCESS)
1552 {
1553 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1554 rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8);
1555 else
1556 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1557 if (IOM_SUCCESS(rcStrict))
1558 {
1559 if (!pCtx->eflags.Bits.u1DF)
1560 pCtx->ADDR_rSI += OP_SIZE / 8;
1561 else
1562 pCtx->ADDR_rSI -= OP_SIZE / 8;
1563 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1564 if (rcStrict != VINF_SUCCESS)
1565 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1566 }
1567 }
1568 return rcStrict;
1569}
1570
1571
1572/**
1573 * Implements 'REP OUTS'.
1574 */
1575IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1576{
1577 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1578 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1579 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1580
1581 /*
1582 * Setup.
1583 */
1584 uint16_t const u16Port = pCtx->dx;
1585 VBOXSTRICTRC rcStrict;
1586 if (!fIoChecked)
1587 {
1588 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1589 if (rcStrict != VINF_SUCCESS)
1590 return rcStrict;
1591 }
1592
1593 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1594 if (uCounterReg == 0)
1595 {
1596 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1597 return VINF_SUCCESS;
1598 }
1599
1600 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1601 uint64_t uBaseAddr;
1602 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg, &uBaseAddr);
1603 if (rcStrict != VINF_SUCCESS)
1604 return rcStrict;
1605
1606 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1607 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1608
1609 /*
1610 * The loop.
1611 */
1612 for (;;)
1613 {
1614 /*
1615 * Do segmentation and virtual page stuff.
1616 */
1617 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1618 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1619 if (cLeftPage > uCounterReg)
1620 cLeftPage = uCounterReg;
1621 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1622 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1623 && ( IS_64_BIT_CODE(pIemCpu)
1624 || ( uAddrReg < pHid->u32Limit
1625 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1626 )
1627 && !IEM_VERIFICATION_ENABLED(pIemCpu)
1628 )
1629 {
1630 RTGCPHYS GCPhysMem;
1631 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1632 if (rcStrict != VINF_SUCCESS)
1633 return rcStrict;
1634
1635 /*
1636 * If we can map the page without trouble, we use the IOM
1637 * string I/O interface to do the job.
1638 */
1639 PGMPAGEMAPLOCK PgLockMem;
1640 OP_TYPE const *puMem;
1641 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1642 if (rcStrict == VINF_SUCCESS)
1643 {
1644 uint32_t cTransfers = cLeftPage;
1645 rcStrict = IOMIOPortWriteString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1646
1647 uint32_t cActualTransfers = cLeftPage - cTransfers;
1648 Assert(cActualTransfers <= cLeftPage);
1649 pCtx->ADDR_rSI = uAddrReg += cbIncr * cActualTransfers;
1650 pCtx->ADDR_rCX = uCounterReg -= cActualTransfers;
1651 puMem += cActualTransfers;
1652
1653 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1654
1655 if (rcStrict != VINF_SUCCESS)
1656 {
1657 if (IOM_SUCCESS(rcStrict))
1658 {
1659 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1660 if (uCounterReg == 0)
1661 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1662 }
1663 return rcStrict;
1664 }
1665
1666 if (uCounterReg == 0)
1667 break;
1668
1669 /* If unaligned, we drop thru and do the page crossing access
1670 below. Otherwise, do the next page. */
1671 if (!(uVirtAddr & (OP_SIZE - 1)))
1672 {
1673 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u);
1674 continue;
1675 }
1676 cLeftPage = 0;
1677 }
1678 }
1679
1680 /*
1681 * Fallback - slow processing till the end of the current page.
1682 * In the cross page boundrary case we will end up here with cLeftPage
1683 * as 0, we execute one loop then.
1684 *
1685 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1686 * I/O port, otherwise it wouldn't really be restartable.
1687 */
1688 /** @todo investigate what the CPU actually does with \#PF/\#GP
1689 * during INS. */
1690 do
1691 {
1692 OP_TYPE uValue;
1693 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1694 if (rcStrict != VINF_SUCCESS)
1695 return rcStrict;
1696
1697 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1698 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1699 else
1700 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1701 if (IOM_SUCCESS(rcStrict))
1702 {
1703 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1704 pCtx->ADDR_rCX = --uCounterReg;
1705 cLeftPage--;
1706 }
1707 if (rcStrict != VINF_SUCCESS)
1708 {
1709 if (IOM_SUCCESS(rcStrict))
1710 {
1711 if (uCounterReg == 0)
1712 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1713 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1714 }
1715 return rcStrict;
1716 }
1717 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1718 } while ((int32_t)cLeftPage > 0);
1719
1720
1721 /*
1722 * Next page. Must check for interrupts and stuff here.
1723 */
1724 if (uCounterReg == 0)
1725 break;
1726 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pCtx->eflags.u);
1727 }
1728
1729 /*
1730 * Done.
1731 */
1732 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1733 return VINF_SUCCESS;
1734}
1735
1736#endif /* OP_SIZE != 64-bit */
1737
1738
1739#undef OP_rAX
1740#undef OP_SIZE
1741#undef ADDR_SIZE
1742#undef ADDR_rDI
1743#undef ADDR_rSI
1744#undef ADDR_rCX
1745#undef ADDR_rIP
1746#undef ADDR2_TYPE
1747#undef ADDR_TYPE
1748#undef ADDR2_TYPE
1749#undef IS_64_BIT_CODE
1750#undef IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
1751#undef IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1752
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette