VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 100743

Last change on this file since 100743 was 100052, checked in by vboxsync, 18 months ago

VMM/IEM: Refactored the enmCpuMode, uCpl, fBypassHandlers, fDisregardLock and fPendingInstruction* IEMCPU members into a single fExec member and associated IEM_F_XXX flag defines. Added more flags needed for recompiled execution. The fExec value is maintained as code is executed, so it does not need to be recalculated in the instruction loops. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 68.8 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 100052 2023-06-02 14:49:14Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*******************************************************************************
30* Defined Constants And Macros *
31*******************************************************************************/
32#if OP_SIZE == 8
33# define OP_rAX al
34#elif OP_SIZE == 16
35# define OP_rAX ax
36#elif OP_SIZE == 32
37# define OP_rAX eax
38#elif OP_SIZE == 64
39# define OP_rAX rax
40#else
41# error "Bad OP_SIZE."
42#endif
43#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
44
45#if ADDR_SIZE == 16
46# define ADDR_rDI di
47# define ADDR_rSI si
48# define ADDR_rCX cx
49# define ADDR2_TYPE uint32_t
50# define ADDR_VMXSTRIO 0
51#elif ADDR_SIZE == 32
52# define ADDR_rDI edi
53# define ADDR_rSI esi
54# define ADDR_rCX ecx
55# define ADDR2_TYPE uint32_t
56# define ADDR_VMXSTRIO 1
57#elif ADDR_SIZE == 64
58# define ADDR_rDI rdi
59# define ADDR_rSI rsi
60# define ADDR_rCX rcx
61# define ADDR2_TYPE uint64_t
62# define ADDR_VMXSTRIO 2
63# define IS_64_BIT_CODE(a_pVCpu) (true)
64#else
65# error "Bad ADDR_SIZE."
66#endif
67#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
68
69#if ADDR_SIZE == 64 || OP_SIZE == 64
70# define IS_64_BIT_CODE(a_pVCpu) (true)
71#elif ADDR_SIZE == 32
72# define IS_64_BIT_CODE(a_pVCpu) IEM_IS_64BIT_CODE(a_pVCpu)
73#else
74# define IS_64_BIT_CODE(a_pVCpu) (false)
75#endif
76
77/** @def IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
78 * Used in the outer (page-by-page) loop to check for reasons for returnning
79 * before completing the instruction. In raw-mode we temporarily enable
80 * interrupts to let the host interrupt us. We cannot let big string operations
81 * hog the CPU, especially not in raw-mode.
82 */
83#define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \
84 do { \
85 if (RT_LIKELY( !VMCPU_FF_IS_ANY_SET(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
86 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
87 && !VM_FF_IS_ANY_SET(a_pVM, VM_FF_YIELD_REPSTR_MASK) \
88 )) \
89 { /* probable */ } \
90 else \
91 { \
92 LogFlow(("%s: Leaving early (outer)! ffcpu=%#RX64 ffvm=%#x\n", \
93 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
94 return VINF_SUCCESS; \
95 } \
96 } while (0)
97
98/** @def IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
99 * This is used in some of the inner loops to make sure we respond immediately
100 * to VMCPU_FF_IOM as well as outside requests. Use this for expensive
101 * instructions. Use IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN for
102 * ones that are typically cheap. */
103#define IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
104 do { \
105 if (RT_LIKELY( ( !VMCPU_FF_IS_ANY_SET(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
106 && !VM_FF_IS_ANY_SET(a_pVM, VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK)) \
107 || (a_fExitExpr) )) \
108 { /* very likely */ } \
109 else \
110 { \
111 LogFlow(("%s: Leaving early (inner)! ffcpu=%#RX64 ffvm=%#x\n", \
112 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
113 return VINF_SUCCESS; \
114 } \
115 } while (0)
116
117
118/** @def IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
119 * This is used in the inner loops where
120 * IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN isn't used. It only
121 * checks the CPU FFs so that we respond immediately to the pending IOM FF
122 * (status code is hidden in IEMCPU::rcPassUp by IEM memory commit code).
123 */
124#define IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
125 do { \
126 if (RT_LIKELY( !VMCPU_FF_IS_ANY_SET(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
127 || (a_fExitExpr) )) \
128 { /* very likely */ } \
129 else \
130 { \
131 LogFlow(("%s: Leaving early (inner)! ffcpu=%#RX64 (ffvm=%#x)\n", \
132 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
133 return VINF_SUCCESS; \
134 } \
135 } while (0)
136
137
138/**
139 * Implements 'REPE CMPS'.
140 */
141IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
142{
143 PVM pVM = pVCpu->CTX_SUFF(pVM);
144
145 /*
146 * Setup.
147 */
148 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
149 if (uCounterReg == 0)
150 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
151
152 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
153
154 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg);
155 uint64_t uSrc1Base = 0; /* gcc may not be used uninitialized */
156 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
157 if (rcStrict != VINF_SUCCESS)
158 return rcStrict;
159
160 uint64_t uSrc2Base = 0; /* gcc may not be used uninitialized */
161 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base);
162 if (rcStrict != VINF_SUCCESS)
163 return rcStrict;
164
165 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
166 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
167 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
168 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
169
170 /*
171 * The loop.
172 */
173 for (;;)
174 {
175 /*
176 * Do segmentation and virtual page stuff.
177 */
178 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
179 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
180 uint32_t cLeftSrc1Page = (GUEST_PAGE_SIZE - (uVirtSrc1Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
181 if (cLeftSrc1Page > uCounterReg)
182 cLeftSrc1Page = uCounterReg;
183 uint32_t cLeftSrc2Page = (GUEST_PAGE_SIZE - (uVirtSrc2Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
184 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
185
186 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
187 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
188 && ( IS_64_BIT_CODE(pVCpu)
189 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
190 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
191 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit
192 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
193 )
194 )
195 {
196 RTGCPHYS GCPhysSrc1Mem;
197 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, OP_SIZE / 8, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
198 if (rcStrict != VINF_SUCCESS)
199 return rcStrict;
200
201 RTGCPHYS GCPhysSrc2Mem;
202 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, OP_SIZE / 8, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
203 if (rcStrict != VINF_SUCCESS)
204 return rcStrict;
205
206 /*
207 * If we can map the page without trouble, do a block processing
208 * until the end of the current page.
209 */
210 PGMPAGEMAPLOCK PgLockSrc2Mem;
211 OP_TYPE const *puSrc2Mem;
212 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
213 if (rcStrict == VINF_SUCCESS)
214 {
215 PGMPAGEMAPLOCK PgLockSrc1Mem;
216 OP_TYPE const *puSrc1Mem;
217 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
218 if (rcStrict == VINF_SUCCESS)
219 {
220 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
221 {
222 /* All matches, only compare the last itme to get the right eflags. */
223 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
224 uSrc1AddrReg += cLeftPage * cbIncr;
225 uSrc2AddrReg += cLeftPage * cbIncr;
226 uCounterReg -= cLeftPage;
227 }
228 else
229 {
230 /* Some mismatch, compare each item (and keep volatile
231 memory in mind). */
232 uint32_t off = 0;
233 do
234 {
235 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
236 off++;
237 } while ( off < cLeftPage
238 && (uEFlags & X86_EFL_ZF));
239 uSrc1AddrReg += cbIncr * off;
240 uSrc2AddrReg += cbIncr * off;
241 uCounterReg -= off;
242 }
243
244 /* Update the registers before looping. */
245 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg;
246 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg;
247 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg;
248 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
249
250 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
251 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
252 if ( uCounterReg == 0
253 || !(uEFlags & X86_EFL_ZF))
254 break;
255 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
256 continue;
257 }
258 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
259 }
260 }
261
262 /*
263 * Fallback - slow processing till the end of the current page.
264 * In the cross page boundrary case we will end up here with cLeftPage
265 * as 0, we execute one loop then.
266 */
267 do
268 {
269 OP_TYPE uValue1;
270 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg);
271 if (rcStrict != VINF_SUCCESS)
272 return rcStrict;
273 OP_TYPE uValue2;
274 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
275 if (rcStrict != VINF_SUCCESS)
276 return rcStrict;
277 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
278
279 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr;
280 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr;
281 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
282 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
283 cLeftPage--;
284 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF));
285 } while ( (int32_t)cLeftPage > 0
286 && (uEFlags & X86_EFL_ZF));
287
288 /*
289 * Next page? Must check for interrupts and stuff here.
290 */
291 if ( uCounterReg == 0
292 || !(uEFlags & X86_EFL_ZF))
293 break;
294 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
295 }
296
297 /*
298 * Done.
299 */
300 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
301}
302
303
304/**
305 * Implements 'REPNE CMPS'.
306 */
307IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
308{
309 PVM pVM = pVCpu->CTX_SUFF(pVM);
310
311 /*
312 * Setup.
313 */
314 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
315 if (uCounterReg == 0)
316 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
317
318 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
319
320 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg);
321 uint64_t uSrc1Base = 0; /* gcc may not be used uninitialized */;
322 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
323 if (rcStrict != VINF_SUCCESS)
324 return rcStrict;
325
326 uint64_t uSrc2Base = 0; /* gcc may not be used uninitialized */
327 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base);
328 if (rcStrict != VINF_SUCCESS)
329 return rcStrict;
330
331 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
332 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
333 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
334 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
335
336 /*
337 * The loop.
338 */
339 for (;;)
340 {
341 /*
342 * Do segmentation and virtual page stuff.
343 */
344 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
345 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
346 uint32_t cLeftSrc1Page = (GUEST_PAGE_SIZE - (uVirtSrc1Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
347 if (cLeftSrc1Page > uCounterReg)
348 cLeftSrc1Page = uCounterReg;
349 uint32_t cLeftSrc2Page = (GUEST_PAGE_SIZE - (uVirtSrc2Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
350 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
351
352 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
353 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
354 && ( IS_64_BIT_CODE(pVCpu)
355 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
356 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
357 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit
358 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
359 )
360 )
361 {
362 RTGCPHYS GCPhysSrc1Mem;
363 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, OP_SIZE / 8, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
364 if (rcStrict != VINF_SUCCESS)
365 return rcStrict;
366
367 RTGCPHYS GCPhysSrc2Mem;
368 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, OP_SIZE / 8, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
369 if (rcStrict != VINF_SUCCESS)
370 return rcStrict;
371
372 /*
373 * If we can map the page without trouble, do a block processing
374 * until the end of the current page.
375 */
376 OP_TYPE const *puSrc2Mem;
377 PGMPAGEMAPLOCK PgLockSrc2Mem;
378 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
379 if (rcStrict == VINF_SUCCESS)
380 {
381 OP_TYPE const *puSrc1Mem;
382 PGMPAGEMAPLOCK PgLockSrc1Mem;
383 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
384 if (rcStrict == VINF_SUCCESS)
385 {
386 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
387 {
388 /* All matches, only compare the last item to get the right eflags. */
389 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
390 uSrc1AddrReg += cLeftPage * cbIncr;
391 uSrc2AddrReg += cLeftPage * cbIncr;
392 uCounterReg -= cLeftPage;
393 }
394 else
395 {
396 /* Some mismatch, compare each item (and keep volatile
397 memory in mind). */
398 uint32_t off = 0;
399 do
400 {
401 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
402 off++;
403 } while ( off < cLeftPage
404 && !(uEFlags & X86_EFL_ZF));
405 uSrc1AddrReg += cbIncr * off;
406 uSrc2AddrReg += cbIncr * off;
407 uCounterReg -= off;
408 }
409
410 /* Update the registers before looping. */
411 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg;
412 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg;
413 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg;
414 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
415
416 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
417 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
418 if ( uCounterReg == 0
419 || (uEFlags & X86_EFL_ZF))
420 break;
421 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
422 continue;
423 }
424 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
425 }
426 }
427
428 /*
429 * Fallback - slow processing till the end of the current page.
430 * In the cross page boundrary case we will end up here with cLeftPage
431 * as 0, we execute one loop then.
432 */
433 do
434 {
435 OP_TYPE uValue1;
436 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg);
437 if (rcStrict != VINF_SUCCESS)
438 return rcStrict;
439 OP_TYPE uValue2;
440 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
441 if (rcStrict != VINF_SUCCESS)
442 return rcStrict;
443 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
444
445 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr;
446 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr;
447 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
448 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
449 cLeftPage--;
450 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF));
451 } while ( (int32_t)cLeftPage > 0
452 && !(uEFlags & X86_EFL_ZF));
453
454 /*
455 * Next page? Must check for interrupts and stuff here.
456 */
457 if ( uCounterReg == 0
458 || (uEFlags & X86_EFL_ZF))
459 break;
460 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
461 }
462
463 /*
464 * Done.
465 */
466 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
467}
468
469
470/**
471 * Implements 'REPE SCAS'.
472 */
473IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
474{
475 PVM pVM = pVCpu->CTX_SUFF(pVM);
476
477 /*
478 * Setup.
479 */
480 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
481 if (uCounterReg == 0)
482 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
483
484 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
485 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
486 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
487 if (rcStrict != VINF_SUCCESS)
488 return rcStrict;
489
490 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
491 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX;
492 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
493 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
494
495 /*
496 * The loop.
497 */
498 for (;;)
499 {
500 /*
501 * Do segmentation and virtual page stuff.
502 */
503 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
504 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
505 if (cLeftPage > uCounterReg)
506 cLeftPage = uCounterReg;
507 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
508 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
509 && ( IS_64_BIT_CODE(pVCpu)
510 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
511 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
512 )
513 )
514 {
515 RTGCPHYS GCPhysMem;
516 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, OP_SIZE / 8, IEM_ACCESS_DATA_R, &GCPhysMem);
517 if (rcStrict != VINF_SUCCESS)
518 return rcStrict;
519
520 /*
521 * If we can map the page without trouble, do a block processing
522 * until the end of the current page.
523 */
524 PGMPAGEMAPLOCK PgLockMem;
525 OP_TYPE const *puMem;
526 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
527 if (rcStrict == VINF_SUCCESS)
528 {
529 /* Search till we find a mismatching item. */
530 OP_TYPE uTmpValue;
531 bool fQuit;
532 uint32_t i = 0;
533 do
534 {
535 uTmpValue = puMem[i++];
536 fQuit = uTmpValue != uValueReg;
537 } while (i < cLeftPage && !fQuit);
538
539 /* Update the regs. */
540 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
541 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i;
542 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr;
543 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
544 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
545 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
546 if ( fQuit
547 || uCounterReg == 0)
548 break;
549
550 /* If unaligned, we drop thru and do the page crossing access
551 below. Otherwise, do the next page. */
552 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
553 {
554 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
555 continue;
556 }
557 cLeftPage = 0;
558 }
559 }
560
561 /*
562 * Fallback - slow processing till the end of the current page.
563 * In the cross page boundrary case we will end up here with cLeftPage
564 * as 0, we execute one loop then.
565 */
566 do
567 {
568 OP_TYPE uTmpValue;
569 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
570 if (rcStrict != VINF_SUCCESS)
571 return rcStrict;
572 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
573
574 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
575 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
576 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
577 cLeftPage--;
578 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF));
579 } while ( (int32_t)cLeftPage > 0
580 && (uEFlags & X86_EFL_ZF));
581
582 /*
583 * Next page? Must check for interrupts and stuff here.
584 */
585 if ( uCounterReg == 0
586 || !(uEFlags & X86_EFL_ZF))
587 break;
588 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
589 }
590
591 /*
592 * Done.
593 */
594 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
595}
596
597
598/**
599 * Implements 'REPNE SCAS'.
600 */
601IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
602{
603 PVM pVM = pVCpu->CTX_SUFF(pVM);
604
605 /*
606 * Setup.
607 */
608 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
609 if (uCounterReg == 0)
610 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
611
612 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
613 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
614 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
615 if (rcStrict != VINF_SUCCESS)
616 return rcStrict;
617
618 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
619 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX;
620 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
621 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
622
623 /*
624 * The loop.
625 */
626 for (;;)
627 {
628 /*
629 * Do segmentation and virtual page stuff.
630 */
631 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
632 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
633 if (cLeftPage > uCounterReg)
634 cLeftPage = uCounterReg;
635 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
636 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
637 && ( IS_64_BIT_CODE(pVCpu)
638 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
639 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
640 )
641 )
642 {
643 RTGCPHYS GCPhysMem;
644 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, OP_SIZE / 8, IEM_ACCESS_DATA_R, &GCPhysMem);
645 if (rcStrict != VINF_SUCCESS)
646 return rcStrict;
647
648 /*
649 * If we can map the page without trouble, do a block processing
650 * until the end of the current page.
651 */
652 PGMPAGEMAPLOCK PgLockMem;
653 OP_TYPE const *puMem;
654 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
655 if (rcStrict == VINF_SUCCESS)
656 {
657 /* Search till we find a mismatching item. */
658 OP_TYPE uTmpValue;
659 bool fQuit;
660 uint32_t i = 0;
661 do
662 {
663 uTmpValue = puMem[i++];
664 fQuit = uTmpValue == uValueReg;
665 } while (i < cLeftPage && !fQuit);
666
667 /* Update the regs. */
668 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
669 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i;
670 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr;
671 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
672 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
673 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
674 if ( fQuit
675 || uCounterReg == 0)
676 break;
677
678 /* If unaligned, we drop thru and do the page crossing access
679 below. Otherwise, do the next page. */
680 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
681 {
682 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
683 continue;
684 }
685 cLeftPage = 0;
686 }
687 }
688
689 /*
690 * Fallback - slow processing till the end of the current page.
691 * In the cross page boundrary case we will end up here with cLeftPage
692 * as 0, we execute one loop then.
693 */
694 do
695 {
696 OP_TYPE uTmpValue;
697 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
698 if (rcStrict != VINF_SUCCESS)
699 return rcStrict;
700 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
701 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
702 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
703 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
704 cLeftPage--;
705 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF));
706 } while ( (int32_t)cLeftPage > 0
707 && !(uEFlags & X86_EFL_ZF));
708
709 /*
710 * Next page? Must check for interrupts and stuff here.
711 */
712 if ( uCounterReg == 0
713 || (uEFlags & X86_EFL_ZF))
714 break;
715 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
716 }
717
718 /*
719 * Done.
720 */
721 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
722}
723
724
725
726
727/**
728 * Implements 'REP MOVS'.
729 */
730IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
731{
732 PVM pVM = pVCpu->CTX_SUFF(pVM);
733
734 /*
735 * Setup.
736 */
737 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
738 if (uCounterReg == 0)
739 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
740
741 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
742
743 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg);
744 uint64_t uSrcBase = 0; /* gcc may not be used uninitialized */
745 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uSrcBase);
746 if (rcStrict != VINF_SUCCESS)
747 return rcStrict;
748
749 uint64_t uDstBase = 0; /* gcc may not be used uninitialized */
750 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uDstBase);
751 if (rcStrict != VINF_SUCCESS)
752 return rcStrict;
753
754 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
755 ADDR_TYPE uSrcAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
756 ADDR_TYPE uDstAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
757
758 /*
759 * Be careful with handle bypassing.
760 */
761 if (pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS)
762 {
763 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
764 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
765 }
766
767 /*
768 * The loop.
769 */
770 for (;;)
771 {
772 /*
773 * Do segmentation and virtual page stuff.
774 */
775 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
776 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
777 uint32_t cLeftSrcPage = (GUEST_PAGE_SIZE - (uVirtSrcAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
778 if (cLeftSrcPage > uCounterReg)
779 cLeftSrcPage = uCounterReg;
780 uint32_t cLeftDstPage = (GUEST_PAGE_SIZE - (uVirtDstAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
781 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
782
783 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
784 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
785 && ( IS_64_BIT_CODE(pVCpu)
786 || ( uSrcAddrReg < pSrcHid->u32Limit
787 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
788 && uDstAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
789 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
790 )
791 )
792 {
793 RTGCPHYS GCPhysSrcMem;
794 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrcAddr, OP_SIZE / 8, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
795 if (rcStrict != VINF_SUCCESS)
796 return rcStrict;
797
798 RTGCPHYS GCPhysDstMem;
799 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtDstAddr, OP_SIZE / 8, IEM_ACCESS_DATA_W, &GCPhysDstMem);
800 if (rcStrict != VINF_SUCCESS)
801 return rcStrict;
802
803 /*
804 * If we can map the page without trouble, do a block processing
805 * until the end of the current page.
806 */
807 PGMPAGEMAPLOCK PgLockDstMem;
808 OP_TYPE *puDstMem;
809 rcStrict = iemMemPageMap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
810 if (rcStrict == VINF_SUCCESS)
811 {
812 PGMPAGEMAPLOCK PgLockSrcMem;
813 OP_TYPE const *puSrcMem;
814 rcStrict = iemMemPageMap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
815 if (rcStrict == VINF_SUCCESS)
816 {
817 Assert( (GCPhysSrcMem >> GUEST_PAGE_SHIFT) != (GCPhysDstMem >> GUEST_PAGE_SHIFT)
818 || ((uintptr_t)puSrcMem >> GUEST_PAGE_SHIFT) == ((uintptr_t)puDstMem >> GUEST_PAGE_SHIFT));
819
820 /* Perform the operation exactly (don't use memcpy to avoid
821 having to consider how its implementation would affect
822 any overlapping source and destination area). */
823 OP_TYPE const *puSrcCur = puSrcMem;
824 OP_TYPE *puDstCur = puDstMem;
825 uint32_t cTodo = cLeftPage;
826 while (cTodo-- > 0)
827 *puDstCur++ = *puSrcCur++;
828
829 /* Update the registers. */
830 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
831 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
832 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
833
834 iemMemPageUnmap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
835 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
836
837 if (uCounterReg == 0)
838 break;
839 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
840 continue;
841 }
842 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
843 }
844 }
845
846 /*
847 * Fallback - slow processing till the end of the current page.
848 * In the cross page boundrary case we will end up here with cLeftPage
849 * as 0, we execute one loop then.
850 */
851 do
852 {
853 OP_TYPE uValue;
854 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uSrcAddrReg);
855 if (rcStrict != VINF_SUCCESS)
856 return rcStrict;
857 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uDstAddrReg, uValue);
858 if (rcStrict != VINF_SUCCESS)
859 return rcStrict;
860
861 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cbIncr;
862 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cbIncr;
863 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
864 cLeftPage--;
865 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
866 } while ((int32_t)cLeftPage > 0);
867
868 /*
869 * Next page. Must check for interrupts and stuff here.
870 */
871 if (uCounterReg == 0)
872 break;
873 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
874 }
875
876 /*
877 * Done.
878 */
879 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
880}
881
882
883/**
884 * Implements 'REP STOS'.
885 */
886IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
887{
888 PVM pVM = pVCpu->CTX_SUFF(pVM);
889
890 /*
891 * Setup.
892 */
893 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
894 if (uCounterReg == 0)
895 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
896
897 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
898
899 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
900 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
901 if (rcStrict != VINF_SUCCESS)
902 return rcStrict;
903
904 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
905 OP_TYPE const uValue = pVCpu->cpum.GstCtx.OP_rAX;
906 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
907
908 /*
909 * Be careful with handle bypassing.
910 */
911 /** @todo Permit doing a page if correctly aligned. */
912 if (pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS)
913 {
914 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
915 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
916 }
917
918 /*
919 * The loop.
920 */
921 for (;;)
922 {
923 /*
924 * Do segmentation and virtual page stuff.
925 */
926 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
927 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
928 if (cLeftPage > uCounterReg)
929 cLeftPage = uCounterReg;
930 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
931 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
932 && ( IS_64_BIT_CODE(pVCpu)
933 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
934 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
935 )
936 )
937 {
938 RTGCPHYS GCPhysMem;
939 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, OP_SIZE / 8, IEM_ACCESS_DATA_W, &GCPhysMem);
940 if (rcStrict != VINF_SUCCESS)
941 return rcStrict;
942
943 /*
944 * If we can map the page without trouble, do a block processing
945 * until the end of the current page.
946 */
947 PGMPAGEMAPLOCK PgLockMem;
948 OP_TYPE *puMem;
949 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
950 if (rcStrict == VINF_SUCCESS)
951 {
952 /* Update the regs first so we can loop on cLeftPage. */
953 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
954 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
955
956 /* Do the memsetting. */
957#if OP_SIZE == 8
958 memset(puMem, uValue, cLeftPage);
959/*#elif OP_SIZE == 32
960 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
961#else
962 while (cLeftPage-- > 0)
963 *puMem++ = uValue;
964#endif
965
966 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
967
968 if (uCounterReg == 0)
969 break;
970
971 /* If unaligned, we drop thru and do the page crossing access
972 below. Otherwise, do the next page. */
973 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
974 {
975 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
976 continue;
977 }
978 cLeftPage = 0;
979 }
980 /* If we got an invalid physical address in the page table, just skip
981 ahead to the next page or the counter reaches zero. This crazy
982 optimization is for a buggy EFI firmware that's driving me nuts. */
983 else if (rcStrict == VERR_PGM_PHYS_TLB_UNASSIGNED)
984 {
985 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
986 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
987 if (uCounterReg == 0)
988 break;
989 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
990 {
991 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
992 continue;
993 }
994 }
995 }
996
997 /*
998 * Fallback - slow processing till the end of the current page.
999 * In the cross page boundrary case we will end up here with cLeftPage
1000 * as 0, we execute one loop then.
1001 */
1002 do
1003 {
1004 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uAddrReg, uValue);
1005 if (rcStrict != VINF_SUCCESS)
1006 return rcStrict;
1007 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
1008 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1009 cLeftPage--;
1010 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1011 } while ((int32_t)cLeftPage > 0);
1012
1013 /*
1014 * Next page. Must check for interrupts and stuff here.
1015 */
1016 if (uCounterReg == 0)
1017 break;
1018 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1019 }
1020
1021 /*
1022 * Done.
1023 */
1024 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1025}
1026
1027
1028/**
1029 * Implements 'REP LODS'.
1030 */
1031IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
1032{
1033 PVM pVM = pVCpu->CTX_SUFF(pVM);
1034
1035 /*
1036 * Setup.
1037 */
1038 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1039 if (uCounterReg == 0)
1040 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1041
1042 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg));
1043 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg);
1044 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
1045 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uBaseAddr);
1046 if (rcStrict != VINF_SUCCESS)
1047 return rcStrict;
1048
1049 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1050 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
1051
1052 /*
1053 * The loop.
1054 */
1055 for (;;)
1056 {
1057 /*
1058 * Do segmentation and virtual page stuff.
1059 */
1060 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1061 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1062 if (cLeftPage > uCounterReg)
1063 cLeftPage = uCounterReg;
1064 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1065 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1066 && ( IS_64_BIT_CODE(pVCpu)
1067 || ( uAddrReg < pSrcHid->u32Limit
1068 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
1069 )
1070 )
1071 {
1072 RTGCPHYS GCPhysMem;
1073 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, OP_SIZE / 8, IEM_ACCESS_DATA_R, &GCPhysMem);
1074 if (rcStrict != VINF_SUCCESS)
1075 return rcStrict;
1076
1077 /*
1078 * If we can map the page without trouble, we can get away with
1079 * just reading the last value on the page.
1080 */
1081 PGMPAGEMAPLOCK PgLockMem;
1082 OP_TYPE const *puMem;
1083 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1084 if (rcStrict == VINF_SUCCESS)
1085 {
1086 /* Only get the last byte, the rest doesn't matter in direct access mode. */
1087#if OP_SIZE == 32
1088 pVCpu->cpum.GstCtx.rax = puMem[cLeftPage - 1];
1089#else
1090 pVCpu->cpum.GstCtx.OP_rAX = puMem[cLeftPage - 1];
1091#endif
1092 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
1093 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
1094 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1095
1096 if (uCounterReg == 0)
1097 break;
1098
1099 /* If unaligned, we drop thru and do the page crossing access
1100 below. Otherwise, do the next page. */
1101 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1102 {
1103 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1104 continue;
1105 }
1106 cLeftPage = 0;
1107 }
1108 }
1109
1110 /*
1111 * Fallback - slow processing till the end of the current page.
1112 * In the cross page boundrary case we will end up here with cLeftPage
1113 * as 0, we execute one loop then.
1114 */
1115 do
1116 {
1117 OP_TYPE uTmpValue;
1118 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, iEffSeg, uAddrReg);
1119 if (rcStrict != VINF_SUCCESS)
1120 return rcStrict;
1121#if OP_SIZE == 32
1122 pVCpu->cpum.GstCtx.rax = uTmpValue;
1123#else
1124 pVCpu->cpum.GstCtx.OP_rAX = uTmpValue;
1125#endif
1126 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr;
1127 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1128 cLeftPage--;
1129 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1130 } while ((int32_t)cLeftPage > 0);
1131
1132 if (rcStrict != VINF_SUCCESS)
1133 break;
1134
1135 /*
1136 * Next page. Must check for interrupts and stuff here.
1137 */
1138 if (uCounterReg == 0)
1139 break;
1140 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1141 }
1142
1143 /*
1144 * Done.
1145 */
1146 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1147}
1148
1149
1150#if OP_SIZE != 64
1151
1152/**
1153 * Implements 'INS' (no rep)
1154 */
1155IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1156{
1157 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1158 VBOXSTRICTRC rcStrict;
1159
1160 /*
1161 * Be careful with handle bypassing.
1162 */
1163 if (pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS)
1164 {
1165 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1166 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1167 }
1168
1169 /*
1170 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1171 * segmentation and finally any #PF due to virtual address translation.
1172 * ASSUMES nothing is read from the I/O port before traps are taken.
1173 */
1174 if (!fIoChecked)
1175 {
1176 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8);
1177 if (rcStrict != VINF_SUCCESS)
1178 return rcStrict;
1179 }
1180
1181 /*
1182 * Check nested-guest I/O intercepts.
1183 */
1184#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1185 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1186 {
1187 VMXEXITINSTRINFO ExitInstrInfo;
1188 ExitInstrInfo.u = 0;
1189 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1190 ExitInstrInfo.StrIo.iSegReg = X86_SREG_ES;
1191 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_INS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, false /* fRep */,
1192 ExitInstrInfo, cbInstr);
1193 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1194 return rcStrict;
1195 }
1196#endif
1197
1198#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1199 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1200 {
1201 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES,
1202 false /* fRep */, true /* fStrIo */, cbInstr);
1203 if (rcStrict == VINF_SVM_VMEXIT)
1204 return VINF_SUCCESS;
1205 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1206 {
1207 Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx,
1208 OP_SIZE / 8, VBOXSTRICTRC_VAL(rcStrict)));
1209 return rcStrict;
1210 }
1211 }
1212#endif
1213
1214 OP_TYPE *puMem;
1215 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI,
1216 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1);
1217 if (rcStrict != VINF_SUCCESS)
1218 return rcStrict;
1219
1220 uint32_t u32Value = 0;
1221 rcStrict = IOMIOPortRead(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, &u32Value, OP_SIZE / 8);
1222 if (IOM_SUCCESS(rcStrict))
1223 {
1224 /**
1225 * @todo I/O breakpoint support for INS
1226 */
1227 *puMem = (OP_TYPE)u32Value;
1228# ifdef IN_RING3
1229 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
1230# else
1231 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
1232# endif
1233 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1234 {
1235 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF)
1236 pVCpu->cpum.GstCtx.ADDR_rDI += OP_SIZE / 8;
1237 else
1238 pVCpu->cpum.GstCtx.ADDR_rDI -= OP_SIZE / 8;
1239
1240 /** @todo finish: work out how this should work wrt status codes. Not sure we
1241 * can use iemSetPassUpStatus here, but it depends on what
1242 * iemRegAddToRipAndFinishingClearingRF may eventually return (if anything)... */
1243 rcStrict2 = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1244 if (rcStrict2 != VINF_SUCCESS)
1245 {
1246 iemSetPassUpStatus(pVCpu, rcStrict);
1247 rcStrict = rcStrict2;
1248 }
1249 pVCpu->iem.s.cPotentialExits++;
1250 }
1251 else
1252 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), RT_FAILURE_NP(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1253 }
1254 return rcStrict;
1255}
1256
1257
1258/**
1259 * Implements 'REP INS'.
1260 */
1261IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1262{
1263 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1264
1265 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_TR);
1266
1267 /*
1268 * Setup.
1269 */
1270 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx;
1271 VBOXSTRICTRC rcStrict;
1272 if (!fIoChecked)
1273 {
1274/** @todo check if this is too early for ecx=0. */
1275 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8);
1276 if (rcStrict != VINF_SUCCESS)
1277 return rcStrict;
1278 }
1279
1280 /*
1281 * Check nested-guest I/O intercepts.
1282 */
1283#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1284 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1285 {
1286 VMXEXITINSTRINFO ExitInstrInfo;
1287 ExitInstrInfo.u = 0;
1288 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1289 ExitInstrInfo.StrIo.iSegReg = X86_SREG_ES;
1290 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_INS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, true /* fRep */,
1291 ExitInstrInfo, cbInstr);
1292 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1293 return rcStrict;
1294 }
1295#endif
1296
1297#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1298 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1299 {
1300 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, true /* fRep */,
1301 true /* fStrIo */, cbInstr);
1302 if (rcStrict == VINF_SVM_VMEXIT)
1303 return VINF_SUCCESS;
1304 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1305 {
1306 Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
1307 VBOXSTRICTRC_VAL(rcStrict)));
1308 return rcStrict;
1309 }
1310 }
1311#endif
1312
1313 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1314 if (uCounterReg == 0)
1315 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1316
1317 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
1318 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
1319 if (rcStrict != VINF_SUCCESS)
1320 return rcStrict;
1321
1322 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1323 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
1324
1325 /*
1326 * Be careful with handle bypassing.
1327 */
1328 if (pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS)
1329 {
1330 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1331 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1332 }
1333
1334 /*
1335 * The loop.
1336 */
1337 for (;;)
1338 {
1339 /*
1340 * Do segmentation and virtual page stuff.
1341 */
1342 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1343 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1344 if (cLeftPage > uCounterReg)
1345 cLeftPage = uCounterReg;
1346 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1347 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1348 && ( IS_64_BIT_CODE(pVCpu)
1349 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
1350 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
1351 )
1352 )
1353 {
1354 RTGCPHYS GCPhysMem;
1355 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, OP_SIZE / 8, IEM_ACCESS_DATA_W, &GCPhysMem);
1356 if (rcStrict != VINF_SUCCESS)
1357 return rcStrict;
1358
1359 /*
1360 * If we can map the page without trouble, use the IOM
1361 * string I/O interface to do the work.
1362 */
1363 PGMPAGEMAPLOCK PgLockMem;
1364 OP_TYPE *puMem;
1365 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1366 if (rcStrict == VINF_SUCCESS)
1367 {
1368 uint32_t cTransfers = cLeftPage;
1369 rcStrict = IOMIOPortReadString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1370
1371 uint32_t cActualTransfers = cLeftPage - cTransfers;
1372 Assert(cActualTransfers <= cLeftPage);
1373 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr * cActualTransfers;
1374 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers;
1375 puMem += cActualTransfers;
1376
1377 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1378
1379 if (rcStrict != VINF_SUCCESS)
1380 {
1381 if (IOM_SUCCESS(rcStrict))
1382 {
1383 /** @todo finish: work out how this should work wrt status codes. Not sure we
1384 * can use iemSetPassUpStatus here, but it depends on what
1385 * iemRegAddToRipAndFinishingClearingRF may eventually return (if anything)... */
1386 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1387 if (uCounterReg == 0)
1388 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1389 pVCpu->iem.s.cPotentialExits++;
1390 }
1391 return rcStrict;
1392 }
1393
1394 /* If unaligned, we drop thru and do the page crossing access
1395 below. Otherwise, do the next page. */
1396 if (uCounterReg == 0)
1397 break;
1398 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1399 {
1400 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1401 continue;
1402 }
1403 cLeftPage = 0;
1404 }
1405 }
1406
1407 /*
1408 * Fallback - slow processing till the end of the current page.
1409 * In the cross page boundrary case we will end up here with cLeftPage
1410 * as 0, we execute one loop then.
1411 *
1412 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1413 * I/O port, otherwise it wouldn't really be restartable.
1414 */
1415 /** @todo investigate what the CPU actually does with \#PF/\#GP
1416 * during INS. */
1417 do
1418 {
1419 OP_TYPE *puMem;
1420 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg,
1421 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1);
1422 if (rcStrict != VINF_SUCCESS)
1423 return rcStrict;
1424
1425 uint32_t u32Value = 0;
1426 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1427 if (!IOM_SUCCESS(rcStrict))
1428 {
1429 iemMemRollback(pVCpu);
1430 return rcStrict;
1431 }
1432
1433 *puMem = (OP_TYPE)u32Value;
1434# ifdef IN_RING3
1435 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
1436# else
1437 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
1438# endif
1439 if (rcStrict2 == VINF_SUCCESS)
1440 { /* likely */ }
1441 else
1442 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1443 RT_FAILURE(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1444
1445 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
1446 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1447
1448 cLeftPage--;
1449 if (rcStrict != VINF_SUCCESS)
1450 {
1451 /** @todo finish: work out how this should work wrt status codes. Not sure we
1452 * can use iemSetPassUpStatus here, but it depends on what
1453 * iemRegAddToRipAndFinishingClearingRF may eventually return (if anything)... */
1454 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1455 if (uCounterReg == 0)
1456 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1457 pVCpu->iem.s.cPotentialExits++;
1458 return rcStrict;
1459 }
1460
1461 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1462 } while ((int32_t)cLeftPage > 0);
1463
1464
1465 /*
1466 * Next page. Must check for interrupts and stuff here.
1467 */
1468 if (uCounterReg == 0)
1469 break;
1470 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1471 }
1472
1473 /*
1474 * Done.
1475 */
1476 pVCpu->iem.s.cPotentialExits++;
1477 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1478}
1479
1480
1481/**
1482 * Implements 'OUTS' (no rep)
1483 */
1484IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1485{
1486 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1487 VBOXSTRICTRC rcStrict;
1488
1489 /*
1490 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1491 * segmentation and finally any #PF due to virtual address translation.
1492 * ASSUMES nothing is read from the I/O port before traps are taken.
1493 */
1494 if (!fIoChecked)
1495 {
1496 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8);
1497 if (rcStrict != VINF_SUCCESS)
1498 return rcStrict;
1499 }
1500
1501 /*
1502 * Check nested-guest I/O intercepts.
1503 */
1504#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1505 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1506 {
1507 VMXEXITINSTRINFO ExitInstrInfo;
1508 ExitInstrInfo.u = 0;
1509 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1510 ExitInstrInfo.StrIo.iSegReg = iEffSeg;
1511 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_OUTS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, false /* fRep */,
1512 ExitInstrInfo, cbInstr);
1513 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1514 return rcStrict;
1515 }
1516#endif
1517
1518#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1519 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1520 {
1521 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg,
1522 false /* fRep */, true /* fStrIo */, cbInstr);
1523 if (rcStrict == VINF_SVM_VMEXIT)
1524 return VINF_SUCCESS;
1525 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1526 {
1527 Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx,
1528 OP_SIZE / 8, VBOXSTRICTRC_VAL(rcStrict)));
1529 return rcStrict;
1530 }
1531 }
1532#endif
1533
1534 OP_TYPE uValue;
1535 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pVCpu->cpum.GstCtx.ADDR_rSI);
1536 if (rcStrict == VINF_SUCCESS)
1537 {
1538 rcStrict = IOMIOPortWrite(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, uValue, OP_SIZE / 8);
1539 if (IOM_SUCCESS(rcStrict))
1540 {
1541 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF)
1542 pVCpu->cpum.GstCtx.ADDR_rSI += OP_SIZE / 8;
1543 else
1544 pVCpu->cpum.GstCtx.ADDR_rSI -= OP_SIZE / 8;
1545 /** @todo finish: work out how this should work wrt status codes. Not sure we
1546 * can use iemSetPassUpStatus here, but it depends on what
1547 * iemRegAddToRipAndFinishingClearingRF may eventually return (if anything)... */
1548 if (rcStrict != VINF_SUCCESS)
1549 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1550 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1551 pVCpu->iem.s.cPotentialExits++;
1552 }
1553 }
1554 return rcStrict;
1555}
1556
1557
1558/**
1559 * Implements 'REP OUTS'.
1560 */
1561IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1562{
1563 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1564
1565 /*
1566 * Setup.
1567 */
1568 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx;
1569 VBOXSTRICTRC rcStrict;
1570 if (!fIoChecked)
1571 {
1572/** @todo check if this is too early for ecx=0. */
1573 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8);
1574 if (rcStrict != VINF_SUCCESS)
1575 return rcStrict;
1576 }
1577
1578 /*
1579 * Check nested-guest I/O intercepts.
1580 */
1581#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1582 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1583 {
1584 VMXEXITINSTRINFO ExitInstrInfo;
1585 ExitInstrInfo.u = 0;
1586 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1587 ExitInstrInfo.StrIo.iSegReg = iEffSeg;
1588 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_OUTS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, true /* fRep */,
1589 ExitInstrInfo, cbInstr);
1590 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1591 return rcStrict;
1592 }
1593#endif
1594
1595#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1596 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1597 {
1598 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, true /* fRep */,
1599 true /* fStrIo */, cbInstr);
1600 if (rcStrict == VINF_SVM_VMEXIT)
1601 return VINF_SUCCESS;
1602 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1603 {
1604 Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
1605 VBOXSTRICTRC_VAL(rcStrict)));
1606 return rcStrict;
1607 }
1608 }
1609#endif
1610
1611 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1612 if (uCounterReg == 0)
1613 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1614
1615 PCCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iEffSeg);
1616 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
1617 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pHid, iEffSeg, &uBaseAddr);
1618 if (rcStrict != VINF_SUCCESS)
1619 return rcStrict;
1620
1621 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1622 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
1623
1624 /*
1625 * The loop.
1626 */
1627 for (;;)
1628 {
1629 /*
1630 * Do segmentation and virtual page stuff.
1631 */
1632 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1633 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1634 if (cLeftPage > uCounterReg)
1635 cLeftPage = uCounterReg;
1636 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1637 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1638 && ( IS_64_BIT_CODE(pVCpu)
1639 || ( uAddrReg < pHid->u32Limit
1640 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1641 )
1642 )
1643 {
1644 RTGCPHYS GCPhysMem;
1645 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, OP_SIZE / 8, IEM_ACCESS_DATA_R, &GCPhysMem);
1646 if (rcStrict != VINF_SUCCESS)
1647 return rcStrict;
1648
1649 /*
1650 * If we can map the page without trouble, we use the IOM
1651 * string I/O interface to do the job.
1652 */
1653 PGMPAGEMAPLOCK PgLockMem;
1654 OP_TYPE const *puMem;
1655 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1656 if (rcStrict == VINF_SUCCESS)
1657 {
1658 uint32_t cTransfers = cLeftPage;
1659 rcStrict = IOMIOPortWriteString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1660
1661 uint32_t cActualTransfers = cLeftPage - cTransfers;
1662 Assert(cActualTransfers <= cLeftPage);
1663 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr * cActualTransfers;
1664 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers;
1665 puMem += cActualTransfers;
1666
1667 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1668
1669 if (rcStrict != VINF_SUCCESS)
1670 {
1671 if (IOM_SUCCESS(rcStrict))
1672 {
1673 /** @todo finish: work out how this should work wrt status codes. Not sure we
1674 * can use iemSetPassUpStatus here, but it depends on what
1675 * iemRegAddToRipAndFinishingClearingRF may eventually return (if anything)... */
1676 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1677 if (uCounterReg == 0)
1678 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1679 pVCpu->iem.s.cPotentialExits++;
1680 }
1681 return rcStrict;
1682 }
1683
1684 if (uCounterReg == 0)
1685 break;
1686
1687 /* If unaligned, we drop thru and do the page crossing access
1688 below. Otherwise, do the next page. */
1689 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1690 {
1691 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1692 continue;
1693 }
1694 cLeftPage = 0;
1695 }
1696 }
1697
1698 /*
1699 * Fallback - slow processing till the end of the current page.
1700 * In the cross page boundrary case we will end up here with cLeftPage
1701 * as 0, we execute one loop then.
1702 *
1703 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1704 * I/O port, otherwise it wouldn't really be restartable.
1705 */
1706 /** @todo investigate what the CPU actually does with \#PF/\#GP
1707 * during INS. */
1708 do
1709 {
1710 OP_TYPE uValue;
1711 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uAddrReg);
1712 if (rcStrict != VINF_SUCCESS)
1713 return rcStrict;
1714
1715 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1716 if (IOM_SUCCESS(rcStrict))
1717 {
1718 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr;
1719 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1720 cLeftPage--;
1721 }
1722 if (rcStrict != VINF_SUCCESS)
1723 {
1724 if (IOM_SUCCESS(rcStrict))
1725 {
1726 /** @todo finish: work out how this should work wrt status codes. Not sure we
1727 * can use iemSetPassUpStatus here, but it depends on what
1728 * iemRegAddToRipAndFinishingClearingRF may eventually return (if anything)... */
1729 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1730 if (uCounterReg == 0)
1731 iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1732 pVCpu->iem.s.cPotentialExits++;
1733 }
1734 return rcStrict;
1735 }
1736 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1737 } while ((int32_t)cLeftPage > 0);
1738
1739
1740 /*
1741 * Next page. Must check for interrupts and stuff here.
1742 */
1743 if (uCounterReg == 0)
1744 break;
1745 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1746 }
1747
1748 /*
1749 * Done.
1750 */
1751 pVCpu->iem.s.cPotentialExits++;
1752 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
1753}
1754
1755#endif /* OP_SIZE != 64-bit */
1756
1757
1758#undef OP_rAX
1759#undef OP_SIZE
1760#undef ADDR_SIZE
1761#undef ADDR_rDI
1762#undef ADDR_rSI
1763#undef ADDR_rCX
1764#undef ADDR_rIP
1765#undef ADDR2_TYPE
1766#undef ADDR_TYPE
1767#undef ADDR2_TYPE
1768#undef ADDR_VMXSTRIO
1769#undef IS_64_BIT_CODE
1770#undef IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
1771#undef IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1772#undef IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1773
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette