VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 96014

Last change on this file since 96014 was 95410, checked in by vboxsync, 2 years ago

VMM/IEM: Alignment checks (#AC(0)/#GP(0)). bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 66.3 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 95410 2022-06-28 18:33:26Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40# define ADDR_VMXSTRIO 0
41#elif ADDR_SIZE == 32
42# define ADDR_rDI edi
43# define ADDR_rSI esi
44# define ADDR_rCX ecx
45# define ADDR2_TYPE uint32_t
46# define ADDR_VMXSTRIO 1
47#elif ADDR_SIZE == 64
48# define ADDR_rDI rdi
49# define ADDR_rSI rsi
50# define ADDR_rCX rcx
51# define ADDR2_TYPE uint64_t
52# define ADDR_VMXSTRIO 2
53# define IS_64_BIT_CODE(a_pVCpu) (true)
54#else
55# error "Bad ADDR_SIZE."
56#endif
57#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
58
59#if ADDR_SIZE == 64 || OP_SIZE == 64
60# define IS_64_BIT_CODE(a_pVCpu) (true)
61#elif ADDR_SIZE == 32
62# define IS_64_BIT_CODE(a_pVCpu) ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT)
63#else
64# define IS_64_BIT_CODE(a_pVCpu) (false)
65#endif
66
67/** @def IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
68 * Used in the outer (page-by-page) loop to check for reasons for returnning
69 * before completing the instruction. In raw-mode we temporarily enable
70 * interrupts to let the host interrupt us. We cannot let big string operations
71 * hog the CPU, especially not in raw-mode.
72 */
73#define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \
74 do { \
75 if (RT_LIKELY( !VMCPU_FF_IS_ANY_SET(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
76 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
77 && !VM_FF_IS_ANY_SET(a_pVM, VM_FF_YIELD_REPSTR_MASK) \
78 )) \
79 { /* probable */ } \
80 else \
81 { \
82 LogFlow(("%s: Leaving early (outer)! ffcpu=%#RX64 ffvm=%#x\n", \
83 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
84 return VINF_SUCCESS; \
85 } \
86 } while (0)
87
88/** @def IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
89 * This is used in some of the inner loops to make sure we respond immediately
90 * to VMCPU_FF_IOM as well as outside requests. Use this for expensive
91 * instructions. Use IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN for
92 * ones that are typically cheap. */
93#define IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
94 do { \
95 if (RT_LIKELY( ( !VMCPU_FF_IS_ANY_SET(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
96 && !VM_FF_IS_ANY_SET(a_pVM, VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK)) \
97 || (a_fExitExpr) )) \
98 { /* very likely */ } \
99 else \
100 { \
101 LogFlow(("%s: Leaving early (inner)! ffcpu=%#RX64 ffvm=%#x\n", \
102 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
103 return VINF_SUCCESS; \
104 } \
105 } while (0)
106
107
108/** @def IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
109 * This is used in the inner loops where
110 * IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN isn't used. It only
111 * checks the CPU FFs so that we respond immediately to the pending IOM FF
112 * (status code is hidden in IEMCPU::rcPassUp by IEM memory commit code).
113 */
114#define IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fExitExpr) \
115 do { \
116 if (RT_LIKELY( !VMCPU_FF_IS_ANY_SET(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
117 || (a_fExitExpr) )) \
118 { /* very likely */ } \
119 else \
120 { \
121 LogFlow(("%s: Leaving early (inner)! ffcpu=%#RX64 (ffvm=%#x)\n", \
122 __FUNCTION__, (uint64_t)(a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
123 return VINF_SUCCESS; \
124 } \
125 } while (0)
126
127
128/**
129 * Implements 'REPE CMPS'.
130 */
131IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
132{
133 PVM pVM = pVCpu->CTX_SUFF(pVM);
134
135 /*
136 * Setup.
137 */
138 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
139 if (uCounterReg == 0)
140 {
141 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
142 return VINF_SUCCESS;
143 }
144
145 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
146
147 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg);
148 uint64_t uSrc1Base = 0; /* gcc may not be used uninitialized */
149 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
150 if (rcStrict != VINF_SUCCESS)
151 return rcStrict;
152
153 uint64_t uSrc2Base = 0; /* gcc may not be used uninitialized */
154 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base);
155 if (rcStrict != VINF_SUCCESS)
156 return rcStrict;
157
158 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
159 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
160 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
161 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
162
163 /*
164 * The loop.
165 */
166 for (;;)
167 {
168 /*
169 * Do segmentation and virtual page stuff.
170 */
171 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
172 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
173 uint32_t cLeftSrc1Page = (GUEST_PAGE_SIZE - (uVirtSrc1Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
174 if (cLeftSrc1Page > uCounterReg)
175 cLeftSrc1Page = uCounterReg;
176 uint32_t cLeftSrc2Page = (GUEST_PAGE_SIZE - (uVirtSrc2Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
177 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
178
179 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
180 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
181 && ( IS_64_BIT_CODE(pVCpu)
182 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
183 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
184 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit
185 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
186 )
187 )
188 {
189 RTGCPHYS GCPhysSrc1Mem;
190 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
191 if (rcStrict != VINF_SUCCESS)
192 return rcStrict;
193
194 RTGCPHYS GCPhysSrc2Mem;
195 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
196 if (rcStrict != VINF_SUCCESS)
197 return rcStrict;
198
199 /*
200 * If we can map the page without trouble, do a block processing
201 * until the end of the current page.
202 */
203 PGMPAGEMAPLOCK PgLockSrc2Mem;
204 OP_TYPE const *puSrc2Mem;
205 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
206 if (rcStrict == VINF_SUCCESS)
207 {
208 PGMPAGEMAPLOCK PgLockSrc1Mem;
209 OP_TYPE const *puSrc1Mem;
210 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
211 if (rcStrict == VINF_SUCCESS)
212 {
213 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
214 {
215 /* All matches, only compare the last itme to get the right eflags. */
216 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
217 uSrc1AddrReg += cLeftPage * cbIncr;
218 uSrc2AddrReg += cLeftPage * cbIncr;
219 uCounterReg -= cLeftPage;
220 }
221 else
222 {
223 /* Some mismatch, compare each item (and keep volatile
224 memory in mind). */
225 uint32_t off = 0;
226 do
227 {
228 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
229 off++;
230 } while ( off < cLeftPage
231 && (uEFlags & X86_EFL_ZF));
232 uSrc1AddrReg += cbIncr * off;
233 uSrc2AddrReg += cbIncr * off;
234 uCounterReg -= off;
235 }
236
237 /* Update the registers before looping. */
238 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg;
239 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg;
240 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg;
241 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
242
243 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
244 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
245 if ( uCounterReg == 0
246 || !(uEFlags & X86_EFL_ZF))
247 break;
248 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
249 continue;
250 }
251 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
252 }
253 }
254
255 /*
256 * Fallback - slow processing till the end of the current page.
257 * In the cross page boundrary case we will end up here with cLeftPage
258 * as 0, we execute one loop then.
259 */
260 do
261 {
262 OP_TYPE uValue1;
263 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg);
264 if (rcStrict != VINF_SUCCESS)
265 return rcStrict;
266 OP_TYPE uValue2;
267 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
268 if (rcStrict != VINF_SUCCESS)
269 return rcStrict;
270 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
271
272 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr;
273 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr;
274 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
275 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
276 cLeftPage--;
277 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF));
278 } while ( (int32_t)cLeftPage > 0
279 && (uEFlags & X86_EFL_ZF));
280
281 /*
282 * Next page? Must check for interrupts and stuff here.
283 */
284 if ( uCounterReg == 0
285 || !(uEFlags & X86_EFL_ZF))
286 break;
287 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
288 }
289
290 /*
291 * Done.
292 */
293 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
294 return VINF_SUCCESS;
295}
296
297
298/**
299 * Implements 'REPNE CMPS'.
300 */
301IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
302{
303 PVM pVM = pVCpu->CTX_SUFF(pVM);
304
305 /*
306 * Setup.
307 */
308 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
309 if (uCounterReg == 0)
310 {
311 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
312 return VINF_SUCCESS;
313 }
314
315 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
316
317 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pVCpu, iEffSeg);
318 uint64_t uSrc1Base = 0; /* gcc may not be used uninitialized */;
319 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
320 if (rcStrict != VINF_SUCCESS)
321 return rcStrict;
322
323 uint64_t uSrc2Base = 0; /* gcc may not be used uninitialized */
324 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uSrc2Base);
325 if (rcStrict != VINF_SUCCESS)
326 return rcStrict;
327
328 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
329 ADDR_TYPE uSrc1AddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
330 ADDR_TYPE uSrc2AddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
331 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
332
333 /*
334 * The loop.
335 */
336 for (;;)
337 {
338 /*
339 * Do segmentation and virtual page stuff.
340 */
341 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
342 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
343 uint32_t cLeftSrc1Page = (GUEST_PAGE_SIZE - (uVirtSrc1Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
344 if (cLeftSrc1Page > uCounterReg)
345 cLeftSrc1Page = uCounterReg;
346 uint32_t cLeftSrc2Page = (GUEST_PAGE_SIZE - (uVirtSrc2Addr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
347 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
348
349 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
350 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
351 && ( IS_64_BIT_CODE(pVCpu)
352 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
353 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
354 && uSrc2AddrReg < pVCpu->cpum.GstCtx.es.u32Limit
355 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
356 )
357 )
358 {
359 RTGCPHYS GCPhysSrc1Mem;
360 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
361 if (rcStrict != VINF_SUCCESS)
362 return rcStrict;
363
364 RTGCPHYS GCPhysSrc2Mem;
365 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
366 if (rcStrict != VINF_SUCCESS)
367 return rcStrict;
368
369 /*
370 * If we can map the page without trouble, do a block processing
371 * until the end of the current page.
372 */
373 OP_TYPE const *puSrc2Mem;
374 PGMPAGEMAPLOCK PgLockSrc2Mem;
375 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
376 if (rcStrict == VINF_SUCCESS)
377 {
378 OP_TYPE const *puSrc1Mem;
379 PGMPAGEMAPLOCK PgLockSrc1Mem;
380 rcStrict = iemMemPageMap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
381 if (rcStrict == VINF_SUCCESS)
382 {
383 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
384 {
385 /* All matches, only compare the last item to get the right eflags. */
386 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
387 uSrc1AddrReg += cLeftPage * cbIncr;
388 uSrc2AddrReg += cLeftPage * cbIncr;
389 uCounterReg -= cLeftPage;
390 }
391 else
392 {
393 /* Some mismatch, compare each item (and keep volatile
394 memory in mind). */
395 uint32_t off = 0;
396 do
397 {
398 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
399 off++;
400 } while ( off < cLeftPage
401 && !(uEFlags & X86_EFL_ZF));
402 uSrc1AddrReg += cbIncr * off;
403 uSrc2AddrReg += cbIncr * off;
404 uCounterReg -= off;
405 }
406
407 /* Update the registers before looping. */
408 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg;
409 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg;
410 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg;
411 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
412
413 iemMemPageUnmap(pVCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
414 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
415 if ( uCounterReg == 0
416 || (uEFlags & X86_EFL_ZF))
417 break;
418 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
419 continue;
420 }
421 iemMemPageUnmap(pVCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
422 }
423 }
424
425 /*
426 * Fallback - slow processing till the end of the current page.
427 * In the cross page boundrary case we will end up here with cLeftPage
428 * as 0, we execute one loop then.
429 */
430 do
431 {
432 OP_TYPE uValue1;
433 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue1, iEffSeg, uSrc1AddrReg);
434 if (rcStrict != VINF_SUCCESS)
435 return rcStrict;
436 OP_TYPE uValue2;
437 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
438 if (rcStrict != VINF_SUCCESS)
439 return rcStrict;
440 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
441
442 pVCpu->cpum.GstCtx.ADDR_rSI = uSrc1AddrReg += cbIncr;
443 pVCpu->cpum.GstCtx.ADDR_rDI = uSrc2AddrReg += cbIncr;
444 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
445 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
446 cLeftPage--;
447 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF));
448 } while ( (int32_t)cLeftPage > 0
449 && !(uEFlags & X86_EFL_ZF));
450
451 /*
452 * Next page? Must check for interrupts and stuff here.
453 */
454 if ( uCounterReg == 0
455 || (uEFlags & X86_EFL_ZF))
456 break;
457 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
458 }
459
460 /*
461 * Done.
462 */
463 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
464 return VINF_SUCCESS;
465}
466
467
468/**
469 * Implements 'REPE SCAS'.
470 */
471IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
472{
473 PVM pVM = pVCpu->CTX_SUFF(pVM);
474
475 /*
476 * Setup.
477 */
478 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
479 if (uCounterReg == 0)
480 {
481 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
482 return VINF_SUCCESS;
483 }
484
485 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
486 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
487 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
488 if (rcStrict != VINF_SUCCESS)
489 return rcStrict;
490
491 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
492 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX;
493 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
494 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
495
496 /*
497 * The loop.
498 */
499 for (;;)
500 {
501 /*
502 * Do segmentation and virtual page stuff.
503 */
504 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
505 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
506 if (cLeftPage > uCounterReg)
507 cLeftPage = uCounterReg;
508 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
509 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
510 && ( IS_64_BIT_CODE(pVCpu)
511 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
512 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
513 )
514 )
515 {
516 RTGCPHYS GCPhysMem;
517 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
518 if (rcStrict != VINF_SUCCESS)
519 return rcStrict;
520
521 /*
522 * If we can map the page without trouble, do a block processing
523 * until the end of the current page.
524 */
525 PGMPAGEMAPLOCK PgLockMem;
526 OP_TYPE const *puMem;
527 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
528 if (rcStrict == VINF_SUCCESS)
529 {
530 /* Search till we find a mismatching item. */
531 OP_TYPE uTmpValue;
532 bool fQuit;
533 uint32_t i = 0;
534 do
535 {
536 uTmpValue = puMem[i++];
537 fQuit = uTmpValue != uValueReg;
538 } while (i < cLeftPage && !fQuit);
539
540 /* Update the regs. */
541 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
542 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i;
543 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr;
544 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
545 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
546 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
547 if ( fQuit
548 || uCounterReg == 0)
549 break;
550
551 /* If unaligned, we drop thru and do the page crossing access
552 below. Otherwise, do the next page. */
553 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
554 {
555 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
556 continue;
557 }
558 cLeftPage = 0;
559 }
560 }
561
562 /*
563 * Fallback - slow processing till the end of the current page.
564 * In the cross page boundrary case we will end up here with cLeftPage
565 * as 0, we execute one loop then.
566 */
567 do
568 {
569 OP_TYPE uTmpValue;
570 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
571 if (rcStrict != VINF_SUCCESS)
572 return rcStrict;
573 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
574
575 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
576 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
577 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
578 cLeftPage--;
579 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || !(uEFlags & X86_EFL_ZF));
580 } while ( (int32_t)cLeftPage > 0
581 && (uEFlags & X86_EFL_ZF));
582
583 /*
584 * Next page? Must check for interrupts and stuff here.
585 */
586 if ( uCounterReg == 0
587 || !(uEFlags & X86_EFL_ZF))
588 break;
589 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
590 }
591
592 /*
593 * Done.
594 */
595 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
596 return VINF_SUCCESS;
597}
598
599
600/**
601 * Implements 'REPNE SCAS'.
602 */
603IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
604{
605 PVM pVM = pVCpu->CTX_SUFF(pVM);
606
607 /*
608 * Setup.
609 */
610 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
611 if (uCounterReg == 0)
612 {
613 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
614 return VINF_SUCCESS;
615 }
616
617 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
618 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
619 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
620 if (rcStrict != VINF_SUCCESS)
621 return rcStrict;
622
623 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
624 OP_TYPE const uValueReg = pVCpu->cpum.GstCtx.OP_rAX;
625 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
626 uint32_t uEFlags = pVCpu->cpum.GstCtx.eflags.u;
627
628 /*
629 * The loop.
630 */
631 for (;;)
632 {
633 /*
634 * Do segmentation and virtual page stuff.
635 */
636 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
637 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
638 if (cLeftPage > uCounterReg)
639 cLeftPage = uCounterReg;
640 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
641 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
642 && ( IS_64_BIT_CODE(pVCpu)
643 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
644 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
645 )
646 )
647 {
648 RTGCPHYS GCPhysMem;
649 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
650 if (rcStrict != VINF_SUCCESS)
651 return rcStrict;
652
653 /*
654 * If we can map the page without trouble, do a block processing
655 * until the end of the current page.
656 */
657 PGMPAGEMAPLOCK PgLockMem;
658 OP_TYPE const *puMem;
659 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
660 if (rcStrict == VINF_SUCCESS)
661 {
662 /* Search till we find a mismatching item. */
663 OP_TYPE uTmpValue;
664 bool fQuit;
665 uint32_t i = 0;
666 do
667 {
668 uTmpValue = puMem[i++];
669 fQuit = uTmpValue == uValueReg;
670 } while (i < cLeftPage && !fQuit);
671
672 /* Update the regs. */
673 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
674 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= i;
675 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += i * cbIncr;
676 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
677 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
678 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
679 if ( fQuit
680 || uCounterReg == 0)
681 break;
682
683 /* If unaligned, we drop thru and do the page crossing access
684 below. Otherwise, do the next page. */
685 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
686 {
687 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
688 continue;
689 }
690 cLeftPage = 0;
691 }
692 }
693
694 /*
695 * Fallback - slow processing till the end of the current page.
696 * In the cross page boundrary case we will end up here with cLeftPage
697 * as 0, we execute one loop then.
698 */
699 do
700 {
701 OP_TYPE uTmpValue;
702 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
703 if (rcStrict != VINF_SUCCESS)
704 return rcStrict;
705 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
706 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
707 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
708 pVCpu->cpum.GstCtx.eflags.u = uEFlags;
709 cLeftPage--;
710 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0 || (uEFlags & X86_EFL_ZF));
711 } while ( (int32_t)cLeftPage > 0
712 && !(uEFlags & X86_EFL_ZF));
713
714 /*
715 * Next page? Must check for interrupts and stuff here.
716 */
717 if ( uCounterReg == 0
718 || (uEFlags & X86_EFL_ZF))
719 break;
720 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, uEFlags);
721 }
722
723 /*
724 * Done.
725 */
726 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
727 return VINF_SUCCESS;
728}
729
730
731
732
733/**
734 * Implements 'REP MOVS'.
735 */
736IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
737{
738 PVM pVM = pVCpu->CTX_SUFF(pVM);
739
740 /*
741 * Setup.
742 */
743 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
744 if (uCounterReg == 0)
745 {
746 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
747 return VINF_SUCCESS;
748 }
749
750 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg) | CPUMCTX_EXTRN_ES);
751
752 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg);
753 uint64_t uSrcBase = 0; /* gcc may not be used uninitialized */
754 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uSrcBase);
755 if (rcStrict != VINF_SUCCESS)
756 return rcStrict;
757
758 uint64_t uDstBase = 0; /* gcc may not be used uninitialized */
759 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uDstBase);
760 if (rcStrict != VINF_SUCCESS)
761 return rcStrict;
762
763 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
764 ADDR_TYPE uSrcAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
765 ADDR_TYPE uDstAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
766
767 /*
768 * Be careful with handle bypassing.
769 */
770 if (pVCpu->iem.s.fBypassHandlers)
771 {
772 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
773 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
774 }
775
776 /*
777 * The loop.
778 */
779 for (;;)
780 {
781 /*
782 * Do segmentation and virtual page stuff.
783 */
784 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
785 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
786 uint32_t cLeftSrcPage = (GUEST_PAGE_SIZE - (uVirtSrcAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
787 if (cLeftSrcPage > uCounterReg)
788 cLeftSrcPage = uCounterReg;
789 uint32_t cLeftDstPage = (GUEST_PAGE_SIZE - (uVirtDstAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
790 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
791
792 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
793 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
794 && ( IS_64_BIT_CODE(pVCpu)
795 || ( uSrcAddrReg < pSrcHid->u32Limit
796 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
797 && uDstAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
798 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
799 )
800 )
801 {
802 RTGCPHYS GCPhysSrcMem;
803 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
804 if (rcStrict != VINF_SUCCESS)
805 return rcStrict;
806
807 RTGCPHYS GCPhysDstMem;
808 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
809 if (rcStrict != VINF_SUCCESS)
810 return rcStrict;
811
812 /*
813 * If we can map the page without trouble, do a block processing
814 * until the end of the current page.
815 */
816 PGMPAGEMAPLOCK PgLockDstMem;
817 OP_TYPE *puDstMem;
818 rcStrict = iemMemPageMap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
819 if (rcStrict == VINF_SUCCESS)
820 {
821 PGMPAGEMAPLOCK PgLockSrcMem;
822 OP_TYPE const *puSrcMem;
823 rcStrict = iemMemPageMap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
824 if (rcStrict == VINF_SUCCESS)
825 {
826 Assert( (GCPhysSrcMem >> GUEST_PAGE_SHIFT) != (GCPhysDstMem >> GUEST_PAGE_SHIFT)
827 || ((uintptr_t)puSrcMem >> GUEST_PAGE_SHIFT) == ((uintptr_t)puDstMem >> GUEST_PAGE_SHIFT));
828
829 /* Perform the operation exactly (don't use memcpy to avoid
830 having to consider how its implementation would affect
831 any overlapping source and destination area). */
832 OP_TYPE const *puSrcCur = puSrcMem;
833 OP_TYPE *puDstCur = puDstMem;
834 uint32_t cTodo = cLeftPage;
835 while (cTodo-- > 0)
836 *puDstCur++ = *puSrcCur++;
837
838 /* Update the registers. */
839 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
840 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
841 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
842
843 iemMemPageUnmap(pVCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
844 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
845
846 if (uCounterReg == 0)
847 break;
848 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
849 continue;
850 }
851 iemMemPageUnmap(pVCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
852 }
853 }
854
855 /*
856 * Fallback - slow processing till the end of the current page.
857 * In the cross page boundrary case we will end up here with cLeftPage
858 * as 0, we execute one loop then.
859 */
860 do
861 {
862 OP_TYPE uValue;
863 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uSrcAddrReg);
864 if (rcStrict != VINF_SUCCESS)
865 return rcStrict;
866 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uDstAddrReg, uValue);
867 if (rcStrict != VINF_SUCCESS)
868 return rcStrict;
869
870 pVCpu->cpum.GstCtx.ADDR_rSI = uSrcAddrReg += cbIncr;
871 pVCpu->cpum.GstCtx.ADDR_rDI = uDstAddrReg += cbIncr;
872 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
873 cLeftPage--;
874 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
875 } while ((int32_t)cLeftPage > 0);
876
877 /*
878 * Next page. Must check for interrupts and stuff here.
879 */
880 if (uCounterReg == 0)
881 break;
882 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
883 }
884
885 /*
886 * Done.
887 */
888 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
889 return VINF_SUCCESS;
890}
891
892
893/**
894 * Implements 'REP STOS'.
895 */
896IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
897{
898 PVM pVM = pVCpu->CTX_SUFF(pVM);
899
900 /*
901 * Setup.
902 */
903 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
904 if (uCounterReg == 0)
905 {
906 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
907 return VINF_SUCCESS;
908 }
909
910 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES);
911
912 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
913 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
914 if (rcStrict != VINF_SUCCESS)
915 return rcStrict;
916
917 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
918 OP_TYPE const uValue = pVCpu->cpum.GstCtx.OP_rAX;
919 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
920
921 /*
922 * Be careful with handle bypassing.
923 */
924 /** @todo Permit doing a page if correctly aligned. */
925 if (pVCpu->iem.s.fBypassHandlers)
926 {
927 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
928 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
929 }
930
931 /*
932 * The loop.
933 */
934 for (;;)
935 {
936 /*
937 * Do segmentation and virtual page stuff.
938 */
939 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
940 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
941 if (cLeftPage > uCounterReg)
942 cLeftPage = uCounterReg;
943 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
944 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
945 && ( IS_64_BIT_CODE(pVCpu)
946 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
947 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
948 )
949 )
950 {
951 RTGCPHYS GCPhysMem;
952 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
953 if (rcStrict != VINF_SUCCESS)
954 return rcStrict;
955
956 /*
957 * If we can map the page without trouble, do a block processing
958 * until the end of the current page.
959 */
960 PGMPAGEMAPLOCK PgLockMem;
961 OP_TYPE *puMem;
962 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
963 if (rcStrict == VINF_SUCCESS)
964 {
965 /* Update the regs first so we can loop on cLeftPage. */
966 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
967 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
968
969 /* Do the memsetting. */
970#if OP_SIZE == 8
971 memset(puMem, uValue, cLeftPage);
972/*#elif OP_SIZE == 32
973 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
974#else
975 while (cLeftPage-- > 0)
976 *puMem++ = uValue;
977#endif
978
979 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
980
981 if (uCounterReg == 0)
982 break;
983
984 /* If unaligned, we drop thru and do the page crossing access
985 below. Otherwise, do the next page. */
986 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
987 {
988 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
989 continue;
990 }
991 cLeftPage = 0;
992 }
993 /* If we got an invalid physical address in the page table, just skip
994 ahead to the next page or the counter reaches zero. This crazy
995 optimization is for a buggy EFI firmware that's driving me nuts. */
996 else if (rcStrict == VERR_PGM_PHYS_TLB_UNASSIGNED)
997 {
998 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
999 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
1000 if (uCounterReg == 0)
1001 break;
1002 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1003 {
1004 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1005 continue;
1006 }
1007 }
1008 }
1009
1010 /*
1011 * Fallback - slow processing till the end of the current page.
1012 * In the cross page boundrary case we will end up here with cLeftPage
1013 * as 0, we execute one loop then.
1014 */
1015 do
1016 {
1017 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pVCpu, X86_SREG_ES, uAddrReg, uValue);
1018 if (rcStrict != VINF_SUCCESS)
1019 return rcStrict;
1020 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
1021 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1022 cLeftPage--;
1023 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1024 } while ((int32_t)cLeftPage > 0);
1025
1026 /*
1027 * Next page. Must check for interrupts and stuff here.
1028 */
1029 if (uCounterReg == 0)
1030 break;
1031 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1032 }
1033
1034 /*
1035 * Done.
1036 */
1037 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1038 return VINF_SUCCESS;
1039}
1040
1041
1042/**
1043 * Implements 'REP LODS'.
1044 */
1045IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
1046{
1047 PVM pVM = pVCpu->CTX_SUFF(pVM);
1048
1049 /*
1050 * Setup.
1051 */
1052 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1053 if (uCounterReg == 0)
1054 {
1055 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1056 return VINF_SUCCESS;
1057 }
1058
1059 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iEffSeg));
1060 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pVCpu, iEffSeg);
1061 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
1062 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pSrcHid, iEffSeg, &uBaseAddr);
1063 if (rcStrict != VINF_SUCCESS)
1064 return rcStrict;
1065
1066 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1067 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
1068
1069 /*
1070 * The loop.
1071 */
1072 for (;;)
1073 {
1074 /*
1075 * Do segmentation and virtual page stuff.
1076 */
1077 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1078 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1079 if (cLeftPage > uCounterReg)
1080 cLeftPage = uCounterReg;
1081 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1082 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1083 && ( IS_64_BIT_CODE(pVCpu)
1084 || ( uAddrReg < pSrcHid->u32Limit
1085 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
1086 )
1087 )
1088 {
1089 RTGCPHYS GCPhysMem;
1090 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1091 if (rcStrict != VINF_SUCCESS)
1092 return rcStrict;
1093
1094 /*
1095 * If we can map the page without trouble, we can get away with
1096 * just reading the last value on the page.
1097 */
1098 PGMPAGEMAPLOCK PgLockMem;
1099 OP_TYPE const *puMem;
1100 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1101 if (rcStrict == VINF_SUCCESS)
1102 {
1103 /* Only get the last byte, the rest doesn't matter in direct access mode. */
1104#if OP_SIZE == 32
1105 pVCpu->cpum.GstCtx.rax = puMem[cLeftPage - 1];
1106#else
1107 pVCpu->cpum.GstCtx.OP_rAX = puMem[cLeftPage - 1];
1108#endif
1109 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cLeftPage;
1110 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
1111 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1112
1113 if (uCounterReg == 0)
1114 break;
1115
1116 /* If unaligned, we drop thru and do the page crossing access
1117 below. Otherwise, do the next page. */
1118 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1119 {
1120 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1121 continue;
1122 }
1123 cLeftPage = 0;
1124 }
1125 }
1126
1127 /*
1128 * Fallback - slow processing till the end of the current page.
1129 * In the cross page boundrary case we will end up here with cLeftPage
1130 * as 0, we execute one loop then.
1131 */
1132 do
1133 {
1134 OP_TYPE uTmpValue;
1135 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uTmpValue, iEffSeg, uAddrReg);
1136 if (rcStrict != VINF_SUCCESS)
1137 return rcStrict;
1138#if OP_SIZE == 32
1139 pVCpu->cpum.GstCtx.rax = uTmpValue;
1140#else
1141 pVCpu->cpum.GstCtx.OP_rAX = uTmpValue;
1142#endif
1143 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr;
1144 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1145 cLeftPage--;
1146 IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1147 } while ((int32_t)cLeftPage > 0);
1148
1149 if (rcStrict != VINF_SUCCESS)
1150 break;
1151
1152 /*
1153 * Next page. Must check for interrupts and stuff here.
1154 */
1155 if (uCounterReg == 0)
1156 break;
1157 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1158 }
1159
1160 /*
1161 * Done.
1162 */
1163 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1164 return VINF_SUCCESS;
1165}
1166
1167
1168#if OP_SIZE != 64
1169
1170/**
1171 * Implements 'INS' (no rep)
1172 */
1173IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1174{
1175 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1176 VBOXSTRICTRC rcStrict;
1177
1178 /*
1179 * Be careful with handle bypassing.
1180 */
1181 if (pVCpu->iem.s.fBypassHandlers)
1182 {
1183 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1184 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1185 }
1186
1187 /*
1188 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1189 * segmentation and finally any #PF due to virtual address translation.
1190 * ASSUMES nothing is read from the I/O port before traps are taken.
1191 */
1192 if (!fIoChecked)
1193 {
1194 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8);
1195 if (rcStrict != VINF_SUCCESS)
1196 return rcStrict;
1197 }
1198
1199 /*
1200 * Check nested-guest I/O intercepts.
1201 */
1202#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1203 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1204 {
1205 VMXEXITINSTRINFO ExitInstrInfo;
1206 ExitInstrInfo.u = 0;
1207 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1208 ExitInstrInfo.StrIo.iSegReg = X86_SREG_ES;
1209 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_INS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, false /* fRep */,
1210 ExitInstrInfo, cbInstr);
1211 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1212 return rcStrict;
1213 }
1214#endif
1215
1216#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1217 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1218 {
1219 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES,
1220 false /* fRep */, true /* fStrIo */, cbInstr);
1221 if (rcStrict == VINF_SVM_VMEXIT)
1222 return VINF_SUCCESS;
1223 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1224 {
1225 Log(("iemCImpl_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx,
1226 OP_SIZE / 8, VBOXSTRICTRC_VAL(rcStrict)));
1227 return rcStrict;
1228 }
1229 }
1230#endif
1231
1232 OP_TYPE *puMem;
1233 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pVCpu->cpum.GstCtx.ADDR_rDI,
1234 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1);
1235 if (rcStrict != VINF_SUCCESS)
1236 return rcStrict;
1237
1238 uint32_t u32Value = 0;
1239 rcStrict = IOMIOPortRead(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, &u32Value, OP_SIZE / 8);
1240 if (IOM_SUCCESS(rcStrict))
1241 {
1242 *puMem = (OP_TYPE)u32Value;
1243# ifdef IN_RING3
1244 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
1245# else
1246 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
1247# endif
1248 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1249 {
1250 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF)
1251 pVCpu->cpum.GstCtx.ADDR_rDI += OP_SIZE / 8;
1252 else
1253 pVCpu->cpum.GstCtx.ADDR_rDI -= OP_SIZE / 8;
1254 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1255 }
1256 else
1257 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), RT_FAILURE_NP(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1258 }
1259 return rcStrict;
1260}
1261
1262
1263/**
1264 * Implements 'REP INS'.
1265 */
1266IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1267{
1268 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1269
1270 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_TR);
1271
1272 /*
1273 * Setup.
1274 */
1275 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx;
1276 VBOXSTRICTRC rcStrict;
1277 if (!fIoChecked)
1278 {
1279/** @todo check if this is too early for ecx=0. */
1280 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8);
1281 if (rcStrict != VINF_SUCCESS)
1282 return rcStrict;
1283 }
1284
1285 /*
1286 * Check nested-guest I/O intercepts.
1287 */
1288#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1289 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1290 {
1291 VMXEXITINSTRINFO ExitInstrInfo;
1292 ExitInstrInfo.u = 0;
1293 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1294 ExitInstrInfo.StrIo.iSegReg = X86_SREG_ES;
1295 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_INS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, true /* fRep */,
1296 ExitInstrInfo, cbInstr);
1297 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1298 return rcStrict;
1299 }
1300#endif
1301
1302#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1303 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1304 {
1305 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, OP_SIZE / 8, ADDR_SIZE, X86_SREG_ES, true /* fRep */,
1306 true /* fStrIo */, cbInstr);
1307 if (rcStrict == VINF_SVM_VMEXIT)
1308 return VINF_SUCCESS;
1309 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1310 {
1311 Log(("iemCImpl_rep_ins_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
1312 VBOXSTRICTRC_VAL(rcStrict)));
1313 return rcStrict;
1314 }
1315 }
1316#endif
1317
1318 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1319 if (uCounterReg == 0)
1320 {
1321 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1322 return VINF_SUCCESS;
1323 }
1324
1325 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
1326 rcStrict = iemMemSegCheckWriteAccessEx(pVCpu, iemSRegUpdateHid(pVCpu, &pVCpu->cpum.GstCtx.es), X86_SREG_ES, &uBaseAddr);
1327 if (rcStrict != VINF_SUCCESS)
1328 return rcStrict;
1329
1330 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1331 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rDI;
1332
1333 /*
1334 * Be careful with handle bypassing.
1335 */
1336 if (pVCpu->iem.s.fBypassHandlers)
1337 {
1338 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1339 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1340 }
1341
1342 /*
1343 * The loop.
1344 */
1345 for (;;)
1346 {
1347 /*
1348 * Do segmentation and virtual page stuff.
1349 */
1350 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1351 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1352 if (cLeftPage > uCounterReg)
1353 cLeftPage = uCounterReg;
1354 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1355 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1356 && ( IS_64_BIT_CODE(pVCpu)
1357 || ( uAddrReg < pVCpu->cpum.GstCtx.es.u32Limit
1358 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pVCpu->cpum.GstCtx.es.u32Limit)
1359 )
1360 )
1361 {
1362 RTGCPHYS GCPhysMem;
1363 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1364 if (rcStrict != VINF_SUCCESS)
1365 return rcStrict;
1366
1367 /*
1368 * If we can map the page without trouble, use the IOM
1369 * string I/O interface to do the work.
1370 */
1371 PGMPAGEMAPLOCK PgLockMem;
1372 OP_TYPE *puMem;
1373 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1374 if (rcStrict == VINF_SUCCESS)
1375 {
1376 uint32_t cTransfers = cLeftPage;
1377 rcStrict = IOMIOPortReadString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1378
1379 uint32_t cActualTransfers = cLeftPage - cTransfers;
1380 Assert(cActualTransfers <= cLeftPage);
1381 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr * cActualTransfers;
1382 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers;
1383 puMem += cActualTransfers;
1384
1385 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1386
1387 if (rcStrict != VINF_SUCCESS)
1388 {
1389 if (IOM_SUCCESS(rcStrict))
1390 {
1391 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1392 if (uCounterReg == 0)
1393 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1394 }
1395 return rcStrict;
1396 }
1397
1398 /* If unaligned, we drop thru and do the page crossing access
1399 below. Otherwise, do the next page. */
1400 if (uCounterReg == 0)
1401 break;
1402 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1403 {
1404 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1405 continue;
1406 }
1407 cLeftPage = 0;
1408 }
1409 }
1410
1411 /*
1412 * Fallback - slow processing till the end of the current page.
1413 * In the cross page boundrary case we will end up here with cLeftPage
1414 * as 0, we execute one loop then.
1415 *
1416 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1417 * I/O port, otherwise it wouldn't really be restartable.
1418 */
1419 /** @todo investigate what the CPU actually does with \#PF/\#GP
1420 * during INS. */
1421 do
1422 {
1423 OP_TYPE *puMem;
1424 rcStrict = iemMemMap(pVCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg,
1425 IEM_ACCESS_DATA_W, OP_SIZE / 8 - 1);
1426 if (rcStrict != VINF_SUCCESS)
1427 return rcStrict;
1428
1429 uint32_t u32Value = 0;
1430 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1431 if (!IOM_SUCCESS(rcStrict))
1432 {
1433 iemMemRollback(pVCpu);
1434 return rcStrict;
1435 }
1436
1437 *puMem = (OP_TYPE)u32Value;
1438# ifdef IN_RING3
1439 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pVCpu, puMem, IEM_ACCESS_DATA_W);
1440# else
1441 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmapPostponeTroubleToR3(pVCpu, puMem, IEM_ACCESS_DATA_W);
1442# endif
1443 if (rcStrict2 == VINF_SUCCESS)
1444 { /* likely */ }
1445 else
1446 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1447 RT_FAILURE(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1448
1449 pVCpu->cpum.GstCtx.ADDR_rDI = uAddrReg += cbIncr;
1450 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1451
1452 cLeftPage--;
1453 if (rcStrict != VINF_SUCCESS)
1454 {
1455 if (uCounterReg == 0)
1456 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1457 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1458 return rcStrict;
1459 }
1460
1461 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1462 } while ((int32_t)cLeftPage > 0);
1463
1464
1465 /*
1466 * Next page. Must check for interrupts and stuff here.
1467 */
1468 if (uCounterReg == 0)
1469 break;
1470 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1471 }
1472
1473 /*
1474 * Done.
1475 */
1476 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1477 return VINF_SUCCESS;
1478}
1479
1480
1481/**
1482 * Implements 'OUTS' (no rep)
1483 */
1484IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1485{
1486 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1487 VBOXSTRICTRC rcStrict;
1488
1489 /*
1490 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1491 * segmentation and finally any #PF due to virtual address translation.
1492 * ASSUMES nothing is read from the I/O port before traps are taken.
1493 */
1494 if (!fIoChecked)
1495 {
1496 rcStrict = iemHlpCheckPortIOPermission(pVCpu, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8);
1497 if (rcStrict != VINF_SUCCESS)
1498 return rcStrict;
1499 }
1500
1501 /*
1502 * Check nested-guest I/O intercepts.
1503 */
1504#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1505 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1506 {
1507 VMXEXITINSTRINFO ExitInstrInfo;
1508 ExitInstrInfo.u = 0;
1509 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1510 ExitInstrInfo.StrIo.iSegReg = iEffSeg;
1511 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_OUTS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, false /* fRep */,
1512 ExitInstrInfo, cbInstr);
1513 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1514 return rcStrict;
1515 }
1516#endif
1517
1518#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1519 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1520 {
1521 rcStrict = iemSvmHandleIOIntercept(pVCpu, pVCpu->cpum.GstCtx.dx, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg,
1522 false /* fRep */, true /* fStrIo */, cbInstr);
1523 if (rcStrict == VINF_SVM_VMEXIT)
1524 return VINF_SUCCESS;
1525 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1526 {
1527 Log(("iemCImpl_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", pVCpu->cpum.GstCtx.dx,
1528 OP_SIZE / 8, VBOXSTRICTRC_VAL(rcStrict)));
1529 return rcStrict;
1530 }
1531 }
1532#endif
1533
1534 OP_TYPE uValue;
1535 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, pVCpu->cpum.GstCtx.ADDR_rSI);
1536 if (rcStrict == VINF_SUCCESS)
1537 {
1538 rcStrict = IOMIOPortWrite(pVM, pVCpu, pVCpu->cpum.GstCtx.dx, uValue, OP_SIZE / 8);
1539 if (IOM_SUCCESS(rcStrict))
1540 {
1541 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1DF)
1542 pVCpu->cpum.GstCtx.ADDR_rSI += OP_SIZE / 8;
1543 else
1544 pVCpu->cpum.GstCtx.ADDR_rSI -= OP_SIZE / 8;
1545 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1546 if (rcStrict != VINF_SUCCESS)
1547 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1548 }
1549 }
1550 return rcStrict;
1551}
1552
1553
1554/**
1555 * Implements 'REP OUTS'.
1556 */
1557IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1558{
1559 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1560
1561 /*
1562 * Setup.
1563 */
1564 uint16_t const u16Port = pVCpu->cpum.GstCtx.dx;
1565 VBOXSTRICTRC rcStrict;
1566 if (!fIoChecked)
1567 {
1568/** @todo check if this is too early for ecx=0. */
1569 rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, OP_SIZE / 8);
1570 if (rcStrict != VINF_SUCCESS)
1571 return rcStrict;
1572 }
1573
1574 /*
1575 * Check nested-guest I/O intercepts.
1576 */
1577#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1578 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1579 {
1580 VMXEXITINSTRINFO ExitInstrInfo;
1581 ExitInstrInfo.u = 0;
1582 ExitInstrInfo.StrIo.u3AddrSize = ADDR_VMXSTRIO;
1583 ExitInstrInfo.StrIo.iSegReg = iEffSeg;
1584 rcStrict = iemVmxVmexitInstrStrIo(pVCpu, VMXINSTRID_IO_OUTS, pVCpu->cpum.GstCtx.dx, OP_SIZE / 8, true /* fRep */,
1585 ExitInstrInfo, cbInstr);
1586 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1587 return rcStrict;
1588 }
1589#endif
1590
1591#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1592 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
1593 {
1594 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, OP_SIZE / 8, ADDR_SIZE, iEffSeg, true /* fRep */,
1595 true /* fStrIo */, cbInstr);
1596 if (rcStrict == VINF_SVM_VMEXIT)
1597 return VINF_SUCCESS;
1598 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
1599 {
1600 Log(("iemCImpl_rep_outs_op: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, OP_SIZE / 8,
1601 VBOXSTRICTRC_VAL(rcStrict)));
1602 return rcStrict;
1603 }
1604 }
1605#endif
1606
1607 ADDR_TYPE uCounterReg = pVCpu->cpum.GstCtx.ADDR_rCX;
1608 if (uCounterReg == 0)
1609 {
1610 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1611 return VINF_SUCCESS;
1612 }
1613
1614 PCCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iEffSeg);
1615 uint64_t uBaseAddr = 0; /* gcc may not be used uninitialized */
1616 rcStrict = iemMemSegCheckReadAccessEx(pVCpu, pHid, iEffSeg, &uBaseAddr);
1617 if (rcStrict != VINF_SUCCESS)
1618 return rcStrict;
1619
1620 int8_t const cbIncr = pVCpu->cpum.GstCtx.eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1621 ADDR_TYPE uAddrReg = pVCpu->cpum.GstCtx.ADDR_rSI;
1622
1623 /*
1624 * The loop.
1625 */
1626 for (;;)
1627 {
1628 /*
1629 * Do segmentation and virtual page stuff.
1630 */
1631 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1632 uint32_t cLeftPage = (GUEST_PAGE_SIZE - (uVirtAddr & GUEST_PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1633 if (cLeftPage > uCounterReg)
1634 cLeftPage = uCounterReg;
1635 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1636 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1637 && ( IS_64_BIT_CODE(pVCpu)
1638 || ( uAddrReg < pHid->u32Limit
1639 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1640 )
1641 )
1642 {
1643 RTGCPHYS GCPhysMem;
1644 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1645 if (rcStrict != VINF_SUCCESS)
1646 return rcStrict;
1647
1648 /*
1649 * If we can map the page without trouble, we use the IOM
1650 * string I/O interface to do the job.
1651 */
1652 PGMPAGEMAPLOCK PgLockMem;
1653 OP_TYPE const *puMem;
1654 rcStrict = iemMemPageMap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1655 if (rcStrict == VINF_SUCCESS)
1656 {
1657 uint32_t cTransfers = cLeftPage;
1658 rcStrict = IOMIOPortWriteString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1659
1660 uint32_t cActualTransfers = cLeftPage - cTransfers;
1661 Assert(cActualTransfers <= cLeftPage);
1662 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr * cActualTransfers;
1663 pVCpu->cpum.GstCtx.ADDR_rCX = uCounterReg -= cActualTransfers;
1664 puMem += cActualTransfers;
1665
1666 iemMemPageUnmap(pVCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1667
1668 if (rcStrict != VINF_SUCCESS)
1669 {
1670 if (IOM_SUCCESS(rcStrict))
1671 {
1672 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1673 if (uCounterReg == 0)
1674 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1675 }
1676 return rcStrict;
1677 }
1678
1679 if (uCounterReg == 0)
1680 break;
1681
1682 /* If unaligned, we drop thru and do the page crossing access
1683 below. Otherwise, do the next page. */
1684 if (!(uVirtAddr & (OP_SIZE / 8 - 1)))
1685 {
1686 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1687 continue;
1688 }
1689 cLeftPage = 0;
1690 }
1691 }
1692
1693 /*
1694 * Fallback - slow processing till the end of the current page.
1695 * In the cross page boundrary case we will end up here with cLeftPage
1696 * as 0, we execute one loop then.
1697 *
1698 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1699 * I/O port, otherwise it wouldn't really be restartable.
1700 */
1701 /** @todo investigate what the CPU actually does with \#PF/\#GP
1702 * during INS. */
1703 do
1704 {
1705 OP_TYPE uValue;
1706 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pVCpu, &uValue, iEffSeg, uAddrReg);
1707 if (rcStrict != VINF_SUCCESS)
1708 return rcStrict;
1709
1710 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1711 if (IOM_SUCCESS(rcStrict))
1712 {
1713 pVCpu->cpum.GstCtx.ADDR_rSI = uAddrReg += cbIncr;
1714 pVCpu->cpum.GstCtx.ADDR_rCX = --uCounterReg;
1715 cLeftPage--;
1716 }
1717 if (rcStrict != VINF_SUCCESS)
1718 {
1719 if (IOM_SUCCESS(rcStrict))
1720 {
1721 if (uCounterReg == 0)
1722 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1723 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1724 }
1725 return rcStrict;
1726 }
1727 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, uCounterReg == 0);
1728 } while ((int32_t)cLeftPage > 0);
1729
1730
1731 /*
1732 * Next page. Must check for interrupts and stuff here.
1733 */
1734 if (uCounterReg == 0)
1735 break;
1736 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pVCpu->cpum.GstCtx.eflags.u);
1737 }
1738
1739 /*
1740 * Done.
1741 */
1742 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1743 return VINF_SUCCESS;
1744}
1745
1746#endif /* OP_SIZE != 64-bit */
1747
1748
1749#undef OP_rAX
1750#undef OP_SIZE
1751#undef ADDR_SIZE
1752#undef ADDR_rDI
1753#undef ADDR_rSI
1754#undef ADDR_rCX
1755#undef ADDR_rIP
1756#undef ADDR2_TYPE
1757#undef ADDR_TYPE
1758#undef ADDR2_TYPE
1759#undef ADDR_VMXSTRIO
1760#undef IS_64_BIT_CODE
1761#undef IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
1762#undef IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1763#undef IEM_CHECK_FF_CPU_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1764
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette