VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 51906

Last change on this file since 51906 was 49271, checked in by vboxsync, 11 years ago

IEM: log the rc on VERR_IEM_IPE_1.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 53.0 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 49271 2013-10-24 10:50:57Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50# define IS_64_BIT_CODE(a_pIemCpu) (true)
51#else
52# error "Bad ADDR_SIZE."
53#endif
54#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
55
56#if ADDR_SIZE == 64 || OP_SIZE == 64
57# define IS_64_BIT_CODE(a_pIemCpu) (true)
58#elif ADDR_SIZE == 32
59# define IS_64_BIT_CODE(a_pIemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT)
60#else
61# define IS_64_BIT_CODE(a_pIemCpu) (false)
62#endif
63
64
65/**
66 * Implements 'REPE CMPS'.
67 */
68IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
69{
70 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
71
72 /*
73 * Setup.
74 */
75 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
76 if (uCounterReg == 0)
77 {
78 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
79 return VINF_SUCCESS;
80 }
81
82 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
83 uint64_t uSrc1Base;
84 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
85 if (rcStrict != VINF_SUCCESS)
86 return rcStrict;
87
88 uint64_t uSrc2Base;
89 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
90 if (rcStrict != VINF_SUCCESS)
91 return rcStrict;
92
93 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
94 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
95 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
96 uint32_t uEFlags = pCtx->eflags.u;
97
98 /*
99 * The loop.
100 */
101 do
102 {
103 /*
104 * Do segmentation and virtual page stuff.
105 */
106 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
107 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
108 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
109 if (cLeftSrc1Page > uCounterReg)
110 cLeftSrc1Page = uCounterReg;
111 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
112 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
113
114 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
115 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
116 && ( IS_64_BIT_CODE(pIemCpu)
117 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
118 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
119 && uSrc2AddrReg < pCtx->es.u32Limit
120 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
121 )
122 )
123 {
124 RTGCPHYS GCPhysSrc1Mem;
125 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
126 if (rcStrict != VINF_SUCCESS)
127 return rcStrict;
128
129 RTGCPHYS GCPhysSrc2Mem;
130 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
131 if (rcStrict != VINF_SUCCESS)
132 return rcStrict;
133
134 /*
135 * If we can map the page without trouble, do a block processing
136 * until the end of the current page.
137 */
138 PGMPAGEMAPLOCK PgLockSrc2Mem;
139 OP_TYPE const *puSrc2Mem;
140 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
141 if (rcStrict == VINF_SUCCESS)
142 {
143 PGMPAGEMAPLOCK PgLockSrc1Mem;
144 OP_TYPE const *puSrc1Mem;
145 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
146 if (rcStrict == VINF_SUCCESS)
147 {
148 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
149 {
150 /* All matches, only compare the last itme to get the right eflags. */
151 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
152 uSrc1AddrReg += cLeftPage * cbIncr;
153 uSrc2AddrReg += cLeftPage * cbIncr;
154 uCounterReg -= cLeftPage;
155 }
156 else
157 {
158 /* Some mismatch, compare each item (and keep volatile
159 memory in mind). */
160 uint32_t off = 0;
161 do
162 {
163 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
164 off++;
165 } while ( off < cLeftPage
166 && (uEFlags & X86_EFL_ZF));
167 uSrc1AddrReg += cbIncr * off;
168 uSrc2AddrReg += cbIncr * off;
169 uCounterReg -= off;
170 }
171
172 /* Update the registers before looping. */
173 pCtx->ADDR_rCX = uCounterReg;
174 pCtx->ADDR_rSI = uSrc1AddrReg;
175 pCtx->ADDR_rDI = uSrc2AddrReg;
176 pCtx->eflags.u = uEFlags;
177
178 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
179 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
180 continue;
181 }
182 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
183 }
184 }
185
186 /*
187 * Fallback - slow processing till the end of the current page.
188 * In the cross page boundrary case we will end up here with cLeftPage
189 * as 0, we execute one loop then.
190 */
191 do
192 {
193 OP_TYPE uValue1;
194 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
195 if (rcStrict != VINF_SUCCESS)
196 return rcStrict;
197 OP_TYPE uValue2;
198 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
199 if (rcStrict != VINF_SUCCESS)
200 return rcStrict;
201 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
202
203 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
204 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
205 pCtx->ADDR_rCX = --uCounterReg;
206 pCtx->eflags.u = uEFlags;
207 cLeftPage--;
208 } while ( (int32_t)cLeftPage > 0
209 && (uEFlags & X86_EFL_ZF));
210 } while ( uCounterReg != 0
211 && (uEFlags & X86_EFL_ZF));
212
213 /*
214 * Done.
215 */
216 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
217 return VINF_SUCCESS;
218}
219
220
221/**
222 * Implements 'REPNE CMPS'.
223 */
224IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
225{
226 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
227
228 /*
229 * Setup.
230 */
231 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
232 if (uCounterReg == 0)
233 {
234 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
235 return VINF_SUCCESS;
236 }
237
238 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
239 uint64_t uSrc1Base;
240 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
241 if (rcStrict != VINF_SUCCESS)
242 return rcStrict;
243
244 uint64_t uSrc2Base;
245 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
246 if (rcStrict != VINF_SUCCESS)
247 return rcStrict;
248
249 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
250 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
251 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
252 uint32_t uEFlags = pCtx->eflags.u;
253
254 /*
255 * The loop.
256 */
257 do
258 {
259 /*
260 * Do segmentation and virtual page stuff.
261 */
262 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
263 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
264 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 if (cLeftSrc1Page > uCounterReg)
266 cLeftSrc1Page = uCounterReg;
267 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
268 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
269
270 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
271 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
272 && ( IS_64_BIT_CODE(pIemCpu)
273 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
274 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
275 && uSrc2AddrReg < pCtx->es.u32Limit
276 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
277 )
278 )
279 {
280 RTGCPHYS GCPhysSrc1Mem;
281 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
282 if (rcStrict != VINF_SUCCESS)
283 return rcStrict;
284
285 RTGCPHYS GCPhysSrc2Mem;
286 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
287 if (rcStrict != VINF_SUCCESS)
288 return rcStrict;
289
290 /*
291 * If we can map the page without trouble, do a block processing
292 * until the end of the current page.
293 */
294 OP_TYPE const *puSrc2Mem;
295 PGMPAGEMAPLOCK PgLockSrc2Mem;
296 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
297 if (rcStrict == VINF_SUCCESS)
298 {
299 OP_TYPE const *puSrc1Mem;
300 PGMPAGEMAPLOCK PgLockSrc1Mem;
301 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
302 if (rcStrict == VINF_SUCCESS)
303 {
304 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
305 {
306 /* All matches, only compare the last item to get the right eflags. */
307 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
308 uSrc1AddrReg += cLeftPage * cbIncr;
309 uSrc2AddrReg += cLeftPage * cbIncr;
310 uCounterReg -= cLeftPage;
311 }
312 else
313 {
314 /* Some mismatch, compare each item (and keep volatile
315 memory in mind). */
316 uint32_t off = 0;
317 do
318 {
319 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
320 off++;
321 } while ( off < cLeftPage
322 && !(uEFlags & X86_EFL_ZF));
323 uSrc1AddrReg += cbIncr * off;
324 uSrc2AddrReg += cbIncr * off;
325 uCounterReg -= off;
326 }
327
328 /* Update the registers before looping. */
329 pCtx->ADDR_rCX = uCounterReg;
330 pCtx->ADDR_rSI = uSrc1AddrReg;
331 pCtx->ADDR_rDI = uSrc2AddrReg;
332 pCtx->eflags.u = uEFlags;
333
334 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 continue;
337 }
338 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
339 }
340 }
341
342 /*
343 * Fallback - slow processing till the end of the current page.
344 * In the cross page boundrary case we will end up here with cLeftPage
345 * as 0, we execute one loop then.
346 */
347 do
348 {
349 OP_TYPE uValue1;
350 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
351 if (rcStrict != VINF_SUCCESS)
352 return rcStrict;
353 OP_TYPE uValue2;
354 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
355 if (rcStrict != VINF_SUCCESS)
356 return rcStrict;
357 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
358
359 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
360 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
361 pCtx->ADDR_rCX = --uCounterReg;
362 pCtx->eflags.u = uEFlags;
363 cLeftPage--;
364 } while ( (int32_t)cLeftPage > 0
365 && !(uEFlags & X86_EFL_ZF));
366 } while ( uCounterReg != 0
367 && !(uEFlags & X86_EFL_ZF));
368
369 /*
370 * Done.
371 */
372 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
373 return VINF_SUCCESS;
374}
375
376
377/**
378 * Implements 'REPE SCAS'.
379 */
380IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
381{
382 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
383
384 /*
385 * Setup.
386 */
387 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
388 if (uCounterReg == 0)
389 {
390 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
391 return VINF_SUCCESS;
392 }
393
394 uint64_t uBaseAddr;
395 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
396 if (rcStrict != VINF_SUCCESS)
397 return rcStrict;
398
399 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
400 OP_TYPE const uValueReg = pCtx->OP_rAX;
401 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
402 uint32_t uEFlags = pCtx->eflags.u;
403
404 /*
405 * The loop.
406 */
407 do
408 {
409 /*
410 * Do segmentation and virtual page stuff.
411 */
412 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418 && ( IS_64_BIT_CODE(pIemCpu)
419 || ( uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
421 )
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 uint64_t uBaseAddr;
517 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
518 if (rcStrict != VINF_SUCCESS)
519 return rcStrict;
520
521 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
522 OP_TYPE const uValueReg = pCtx->OP_rAX;
523 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
524 uint32_t uEFlags = pCtx->eflags.u;
525
526 /*
527 * The loop.
528 */
529 do
530 {
531 /*
532 * Do segmentation and virtual page stuff.
533 */
534 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
535 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
536 if (cLeftPage > uCounterReg)
537 cLeftPage = uCounterReg;
538 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
539 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
540 && ( IS_64_BIT_CODE(pIemCpu)
541 || ( uAddrReg < pCtx->es.u32Limit
542 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
543 )
544 )
545 {
546 RTGCPHYS GCPhysMem;
547 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
548 if (rcStrict != VINF_SUCCESS)
549 return rcStrict;
550
551 /*
552 * If we can map the page without trouble, do a block processing
553 * until the end of the current page.
554 */
555 PGMPAGEMAPLOCK PgLockMem;
556 OP_TYPE const *puMem;
557 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
558 if (rcStrict == VINF_SUCCESS)
559 {
560 /* Search till we find a mismatching item. */
561 OP_TYPE uTmpValue;
562 bool fQuit;
563 uint32_t i = 0;
564 do
565 {
566 uTmpValue = puMem[i++];
567 fQuit = uTmpValue == uValueReg;
568 } while (i < cLeftPage && !fQuit);
569
570 /* Update the regs. */
571 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
572 pCtx->ADDR_rCX = uCounterReg -= i;
573 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
574 pCtx->eflags.u = uEFlags;
575 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
576 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
577 if (fQuit)
578 break;
579
580
581 /* If unaligned, we drop thru and do the page crossing access
582 below. Otherwise, do the next page. */
583 if (!(uVirtAddr & (OP_SIZE - 1)))
584 continue;
585 if (uCounterReg == 0)
586 break;
587 cLeftPage = 0;
588 }
589 }
590
591 /*
592 * Fallback - slow processing till the end of the current page.
593 * In the cross page boundrary case we will end up here with cLeftPage
594 * as 0, we execute one loop then.
595 */
596 do
597 {
598 OP_TYPE uTmpValue;
599 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
600 if (rcStrict != VINF_SUCCESS)
601 return rcStrict;
602 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
603 pCtx->ADDR_rDI = uAddrReg += cbIncr;
604 pCtx->ADDR_rCX = --uCounterReg;
605 pCtx->eflags.u = uEFlags;
606 cLeftPage--;
607 } while ( (int32_t)cLeftPage > 0
608 && !(uEFlags & X86_EFL_ZF));
609 } while ( uCounterReg != 0
610 && !(uEFlags & X86_EFL_ZF));
611
612 /*
613 * Done.
614 */
615 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
616 return VINF_SUCCESS;
617}
618
619
620
621
622/**
623 * Implements 'REP MOVS'.
624 */
625IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
626{
627 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
628
629 /*
630 * Setup.
631 */
632 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
633 if (uCounterReg == 0)
634 {
635 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
636 return VINF_SUCCESS;
637 }
638
639 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
640 uint64_t uSrcBase;
641 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uSrcBase);
642 if (rcStrict != VINF_SUCCESS)
643 return rcStrict;
644
645 uint64_t uDstBase;
646 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uDstBase);
647 if (rcStrict != VINF_SUCCESS)
648 return rcStrict;
649
650 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
651 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
652 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
653
654 /*
655 * Be careful with handle bypassing.
656 */
657 if (pIemCpu->fBypassHandlers)
658 {
659 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
660 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
661 }
662
663 /*
664 * If we're reading back what we write, we have to let the verfication code
665 * to prevent a false positive.
666 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
667 */
668#ifdef IEM_VERIFICATION_MODE_FULL
669 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
670 && (cbIncr > 0
671 ? uSrcAddrReg <= uDstAddrReg
672 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
673 : uDstAddrReg <= uSrcAddrReg
674 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
675 pIemCpu->fOverlappingMovs = true;
676#endif
677
678 /*
679 * The loop.
680 */
681 do
682 {
683 /*
684 * Do segmentation and virtual page stuff.
685 */
686 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
687 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
688 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
689 if (cLeftSrcPage > uCounterReg)
690 cLeftSrcPage = uCounterReg;
691 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
692 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
693
694 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
695 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
696 && ( IS_64_BIT_CODE(pIemCpu)
697 || ( uSrcAddrReg < pSrcHid->u32Limit
698 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
699 && uDstAddrReg < pCtx->es.u32Limit
700 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
701 )
702 )
703 {
704 RTGCPHYS GCPhysSrcMem;
705 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
706 if (rcStrict != VINF_SUCCESS)
707 return rcStrict;
708
709 RTGCPHYS GCPhysDstMem;
710 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
711 if (rcStrict != VINF_SUCCESS)
712 return rcStrict;
713
714 /*
715 * If we can map the page without trouble, do a block processing
716 * until the end of the current page.
717 */
718 PGMPAGEMAPLOCK PgLockDstMem;
719 OP_TYPE *puDstMem;
720 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
721 if (rcStrict == VINF_SUCCESS)
722 {
723 PGMPAGEMAPLOCK PgLockSrcMem;
724 OP_TYPE const *puSrcMem;
725 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
726 if (rcStrict == VINF_SUCCESS)
727 {
728 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
729 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
730
731 /* Perform the operation exactly (don't use memcpy to avoid
732 having to consider how its implementation would affect
733 any overlapping source and destination area). */
734 OP_TYPE const *puSrcCur = puSrcMem;
735 OP_TYPE *puDstCur = puDstMem;
736 uint32_t cTodo = cLeftPage;
737 while (cTodo-- > 0)
738 *puDstCur++ = *puSrcCur++;
739
740 /* Update the registers. */
741 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
742 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
743 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
744
745 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
746 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
747 continue;
748 }
749 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
750 }
751 }
752
753 /*
754 * Fallback - slow processing till the end of the current page.
755 * In the cross page boundrary case we will end up here with cLeftPage
756 * as 0, we execute one loop then.
757 */
758 do
759 {
760 OP_TYPE uValue;
761 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
762 if (rcStrict != VINF_SUCCESS)
763 return rcStrict;
764 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
765 if (rcStrict != VINF_SUCCESS)
766 return rcStrict;
767
768 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
769 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
770 pCtx->ADDR_rCX = --uCounterReg;
771 cLeftPage--;
772 } while ((int32_t)cLeftPage > 0);
773 } while (uCounterReg != 0);
774
775 /*
776 * Done.
777 */
778 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
779 return VINF_SUCCESS;
780}
781
782
783/**
784 * Implements 'REP STOS'.
785 */
786IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
787{
788 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
789
790 /*
791 * Setup.
792 */
793 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
794 if (uCounterReg == 0)
795 {
796 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
797 return VINF_SUCCESS;
798 }
799
800 uint64_t uBaseAddr;
801 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
802 if (rcStrict != VINF_SUCCESS)
803 return rcStrict;
804
805 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
806 OP_TYPE const uValue = pCtx->OP_rAX;
807 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
808
809 /*
810 * Be careful with handle bypassing.
811 */
812 /** @todo Permit doing a page if correctly aligned. */
813 if (pIemCpu->fBypassHandlers)
814 {
815 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
816 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
817 }
818
819 /*
820 * The loop.
821 */
822 do
823 {
824 /*
825 * Do segmentation and virtual page stuff.
826 */
827 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
828 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
829 if (cLeftPage > uCounterReg)
830 cLeftPage = uCounterReg;
831 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
832 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
833 && ( IS_64_BIT_CODE(pIemCpu)
834 || ( uAddrReg < pCtx->es.u32Limit
835 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
836 )
837 )
838 {
839 RTGCPHYS GCPhysMem;
840 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
841 if (rcStrict != VINF_SUCCESS)
842 return rcStrict;
843
844 /*
845 * If we can map the page without trouble, do a block processing
846 * until the end of the current page.
847 */
848 PGMPAGEMAPLOCK PgLockMem;
849 OP_TYPE *puMem;
850 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
851 if (rcStrict == VINF_SUCCESS)
852 {
853 /* Update the regs first so we can loop on cLeftPage. */
854 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
855 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
856
857 /* Do the memsetting. */
858#if OP_SIZE == 8
859 memset(puMem, uValue, cLeftPage);
860/*#elif OP_SIZE == 32
861 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
862#else
863 while (cLeftPage-- > 0)
864 *puMem++ = uValue;
865#endif
866
867 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
868
869 /* If unaligned, we drop thru and do the page crossing access
870 below. Otherwise, do the next page. */
871 if (!(uVirtAddr & (OP_SIZE - 1)))
872 continue;
873 if (uCounterReg == 0)
874 break;
875 cLeftPage = 0;
876 }
877 }
878
879 /*
880 * Fallback - slow processing till the end of the current page.
881 * In the cross page boundrary case we will end up here with cLeftPage
882 * as 0, we execute one loop then.
883 */
884 do
885 {
886 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
887 if (rcStrict != VINF_SUCCESS)
888 return rcStrict;
889 pCtx->ADDR_rDI = uAddrReg += cbIncr;
890 pCtx->ADDR_rCX = --uCounterReg;
891 cLeftPage--;
892 } while ((int32_t)cLeftPage > 0);
893 } while (uCounterReg != 0);
894
895 /*
896 * Done.
897 */
898 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
899 return VINF_SUCCESS;
900}
901
902
903/**
904 * Implements 'REP LODS'.
905 */
906IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
907{
908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
909
910 /*
911 * Setup.
912 */
913 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
914 if (uCounterReg == 0)
915 {
916 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
917 return VINF_SUCCESS;
918 }
919
920 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
921 uint64_t uBaseAddr;
922 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uBaseAddr);
923 if (rcStrict != VINF_SUCCESS)
924 return rcStrict;
925
926 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
927 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
928
929 /*
930 * The loop.
931 */
932 do
933 {
934 /*
935 * Do segmentation and virtual page stuff.
936 */
937 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
938 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
939 if (cLeftPage > uCounterReg)
940 cLeftPage = uCounterReg;
941 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
942 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
943 && ( IS_64_BIT_CODE(pIemCpu)
944 || ( uAddrReg < pSrcHid->u32Limit
945 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
946 )
947 )
948 {
949 RTGCPHYS GCPhysMem;
950 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
951 if (rcStrict != VINF_SUCCESS)
952 return rcStrict;
953
954 /*
955 * If we can map the page without trouble, we can get away with
956 * just reading the last value on the page.
957 */
958 PGMPAGEMAPLOCK PgLockMem;
959 OP_TYPE const *puMem;
960 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
961 if (rcStrict == VINF_SUCCESS)
962 {
963 /* Only get the last byte, the rest doesn't matter in direct access mode. */
964#if OP_SIZE == 32
965 pCtx->rax = puMem[cLeftPage - 1];
966#else
967 pCtx->OP_rAX = puMem[cLeftPage - 1];
968#endif
969 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
970 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
971 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
972
973 /* If unaligned, we drop thru and do the page crossing access
974 below. Otherwise, do the next page. */
975 if (!(uVirtAddr & (OP_SIZE - 1)))
976 continue;
977 if (uCounterReg == 0)
978 break;
979 cLeftPage = 0;
980 }
981 }
982
983 /*
984 * Fallback - slow processing till the end of the current page.
985 * In the cross page boundrary case we will end up here with cLeftPage
986 * as 0, we execute one loop then.
987 */
988 do
989 {
990 OP_TYPE uTmpValue;
991 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
992 if (rcStrict != VINF_SUCCESS)
993 return rcStrict;
994#if OP_SIZE == 32
995 pCtx->rax = uTmpValue;
996#else
997 pCtx->OP_rAX = uTmpValue;
998#endif
999 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1000 pCtx->ADDR_rCX = --uCounterReg;
1001 cLeftPage--;
1002 } while ((int32_t)cLeftPage > 0);
1003 if (rcStrict != VINF_SUCCESS)
1004 break;
1005 } while (uCounterReg != 0);
1006
1007 /*
1008 * Done.
1009 */
1010 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1011 return VINF_SUCCESS;
1012}
1013
1014
1015#if OP_SIZE != 64
1016
1017/**
1018 * Implements 'INS' (no rep)
1019 */
1020IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1021{
1022 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1023 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1024 VBOXSTRICTRC rcStrict;
1025
1026 /*
1027 * Be careful with handle bypassing.
1028 */
1029 if (pIemCpu->fBypassHandlers)
1030 {
1031 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1032 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1033 }
1034
1035 /*
1036 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1037 * segmentation and finally any #PF due to virtual address translation.
1038 * ASSUMES nothing is read from the I/O port before traps are taken.
1039 */
1040 if (!fIoChecked)
1041 {
1042 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1043 if (rcStrict != VINF_SUCCESS)
1044 return rcStrict;
1045 }
1046
1047 OP_TYPE *puMem;
1048 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1049 if (rcStrict != VINF_SUCCESS)
1050 return rcStrict;
1051
1052 uint32_t u32Value = 0;
1053 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1054 rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8);
1055 else
1056 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1057 if (IOM_SUCCESS(rcStrict))
1058 {
1059 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1060 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1061 {
1062 if (!pCtx->eflags.Bits.u1DF)
1063 pCtx->ADDR_rDI += OP_SIZE / 8;
1064 else
1065 pCtx->ADDR_rDI -= OP_SIZE / 8;
1066 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1067 }
1068 /* iemMemMap already check permissions, so this may only be real errors
1069 or access handlers medling. The access handler case is going to
1070 cause misbehavior if the instruction is re-interpreted or smth. So,
1071 we fail with an internal error here instead. */
1072 else
1073 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), VERR_IEM_IPE_1);
1074 }
1075 return rcStrict;
1076}
1077
1078
1079/**
1080 * Implements 'REP INS'.
1081 */
1082IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1083{
1084 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1085 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1086 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1087
1088 /*
1089 * Setup.
1090 */
1091 uint16_t const u16Port = pCtx->dx;
1092 VBOXSTRICTRC rcStrict;
1093 if (!fIoChecked)
1094 {
1095 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1096 if (rcStrict != VINF_SUCCESS)
1097 return rcStrict;
1098 }
1099
1100 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1101 if (uCounterReg == 0)
1102 {
1103 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1104 return VINF_SUCCESS;
1105 }
1106
1107 uint64_t uBaseAddr;
1108 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
1109 if (rcStrict != VINF_SUCCESS)
1110 return rcStrict;
1111
1112 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1113 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1114
1115 /*
1116 * Be careful with handle bypassing.
1117 */
1118 if (pIemCpu->fBypassHandlers)
1119 {
1120 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1121 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1122 }
1123
1124 /*
1125 * The loop.
1126 */
1127 do
1128 {
1129 /*
1130 * Do segmentation and virtual page stuff.
1131 */
1132 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1133 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1134 if (cLeftPage > uCounterReg)
1135 cLeftPage = uCounterReg;
1136 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1137 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1138 && ( IS_64_BIT_CODE(pIemCpu)
1139 || ( uAddrReg < pCtx->es.u32Limit
1140 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
1141 )
1142 )
1143 {
1144 RTGCPHYS GCPhysMem;
1145 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1146 if (rcStrict != VINF_SUCCESS)
1147 return rcStrict;
1148
1149 /*
1150 * If we can map the page without trouble, we would've liked to use
1151 * an string I/O method to do the work, but the current IOM
1152 * interface doesn't match our current approach. So, do a regular
1153 * loop instead.
1154 */
1155 /** @todo Change the I/O manager interface to make use of
1156 * mapped buffers instead of leaving those bits to the
1157 * device implementation! */
1158 PGMPAGEMAPLOCK PgLockMem;
1159 OP_TYPE *puMem;
1160 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1161 if (rcStrict == VINF_SUCCESS)
1162 {
1163 uint32_t off = 0;
1164 while (off < cLeftPage)
1165 {
1166 uint32_t u32Value;
1167 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1168 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1169 else
1170 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1171 if (IOM_SUCCESS(rcStrict))
1172 {
1173 puMem[off] = (OP_TYPE)u32Value;
1174 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1175 pCtx->ADDR_rCX = --uCounterReg;
1176 }
1177 if (rcStrict != VINF_SUCCESS)
1178 {
1179 if (IOM_SUCCESS(rcStrict))
1180 {
1181 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1182 if (uCounterReg == 0)
1183 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1184 }
1185 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1186 return rcStrict;
1187 }
1188 off++;
1189 }
1190 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1191
1192 /* If unaligned, we drop thru and do the page crossing access
1193 below. Otherwise, do the next page. */
1194 if (!(uVirtAddr & (OP_SIZE - 1)))
1195 continue;
1196 if (uCounterReg == 0)
1197 break;
1198 cLeftPage = 0;
1199 }
1200 }
1201
1202 /*
1203 * Fallback - slow processing till the end of the current page.
1204 * In the cross page boundrary case we will end up here with cLeftPage
1205 * as 0, we execute one loop then.
1206 *
1207 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1208 * I/O port, otherwise it wouldn't really be restartable.
1209 */
1210 /** @todo investigate what the CPU actually does with \#PF/\#GP
1211 * during INS. */
1212 do
1213 {
1214 OP_TYPE *puMem;
1215 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1216 if (rcStrict != VINF_SUCCESS)
1217 return rcStrict;
1218
1219 uint32_t u32Value = 0;
1220 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1221 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1222 else
1223 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1224 if (!IOM_SUCCESS(rcStrict))
1225 return rcStrict;
1226
1227 *puMem = (OP_TYPE)u32Value;
1228 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1229 AssertLogRelMsgReturn(rcStrict2 == VINF_SUCCESS, ("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1230 VERR_IEM_IPE_1); /* See non-rep version. */
1231
1232 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1233 pCtx->ADDR_rCX = --uCounterReg;
1234
1235 cLeftPage--;
1236 if (rcStrict != VINF_SUCCESS)
1237 {
1238 if (uCounterReg == 0)
1239 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1240 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1241 return rcStrict;
1242 }
1243 } while ((int32_t)cLeftPage > 0);
1244 } while (uCounterReg != 0);
1245
1246 /*
1247 * Done.
1248 */
1249 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1250 return VINF_SUCCESS;
1251}
1252
1253
1254/**
1255 * Implements 'OUTS' (no rep)
1256 */
1257IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1258{
1259 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1260 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1261 VBOXSTRICTRC rcStrict;
1262
1263 /*
1264 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1265 * segmentation and finally any #PF due to virtual address translation.
1266 * ASSUMES nothing is read from the I/O port before traps are taken.
1267 */
1268 if (!fIoChecked)
1269 {
1270 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1271 if (rcStrict != VINF_SUCCESS)
1272 return rcStrict;
1273 }
1274
1275 OP_TYPE uValue;
1276 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1277 if (rcStrict == VINF_SUCCESS)
1278 {
1279 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1280 rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8);
1281 else
1282 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1283 if (IOM_SUCCESS(rcStrict))
1284 {
1285 if (!pCtx->eflags.Bits.u1DF)
1286 pCtx->ADDR_rSI += OP_SIZE / 8;
1287 else
1288 pCtx->ADDR_rSI -= OP_SIZE / 8;
1289 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1290 if (rcStrict != VINF_SUCCESS)
1291 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1292 }
1293 }
1294 return rcStrict;
1295}
1296
1297
1298/**
1299 * Implements 'REP OUTS'.
1300 */
1301IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1302{
1303 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1304 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1305 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1306
1307 /*
1308 * Setup.
1309 */
1310 uint16_t const u16Port = pCtx->dx;
1311 VBOXSTRICTRC rcStrict;
1312 if (!fIoChecked)
1313 {
1314 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1315 if (rcStrict != VINF_SUCCESS)
1316 return rcStrict;
1317 }
1318
1319 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1320 if (uCounterReg == 0)
1321 {
1322 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1323 return VINF_SUCCESS;
1324 }
1325
1326 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1327 uint64_t uBaseAddr;
1328 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg, &uBaseAddr);
1329 if (rcStrict != VINF_SUCCESS)
1330 return rcStrict;
1331
1332 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1333 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1334
1335 /*
1336 * The loop.
1337 */
1338 do
1339 {
1340 /*
1341 * Do segmentation and virtual page stuff.
1342 */
1343 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1344 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1345 if (cLeftPage > uCounterReg)
1346 cLeftPage = uCounterReg;
1347 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1348 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1349 && ( IS_64_BIT_CODE(pIemCpu)
1350 || ( uAddrReg < pHid->u32Limit
1351 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1352 )
1353 )
1354 {
1355 RTGCPHYS GCPhysMem;
1356 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1357 if (rcStrict != VINF_SUCCESS)
1358 return rcStrict;
1359
1360 /*
1361 * If we can map the page without trouble, we would've liked to use
1362 * an string I/O method to do the work, but the current IOM
1363 * interface doesn't match our current approach. So, do a regular
1364 * loop instead.
1365 */
1366 /** @todo Change the I/O manager interface to make use of
1367 * mapped buffers instead of leaving those bits to the
1368 * device implementation? */
1369 PGMPAGEMAPLOCK PgLockMem;
1370 OP_TYPE const *puMem;
1371 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1372 if (rcStrict == VINF_SUCCESS)
1373 {
1374 uint32_t off = 0;
1375 while (off < cLeftPage)
1376 {
1377 uint32_t u32Value = *puMem++;
1378 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1379 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, OP_SIZE / 8);
1380 else
1381 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1382 if (IOM_SUCCESS(rcStrict))
1383 {
1384 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1385 pCtx->ADDR_rCX = --uCounterReg;
1386 }
1387 if (rcStrict != VINF_SUCCESS)
1388 {
1389 if (IOM_SUCCESS(rcStrict))
1390 {
1391 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1392 if (uCounterReg == 0)
1393 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1394 }
1395 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1396 return rcStrict;
1397 }
1398 off++;
1399 }
1400 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1401
1402 /* If unaligned, we drop thru and do the page crossing access
1403 below. Otherwise, do the next page. */
1404 if (!(uVirtAddr & (OP_SIZE - 1)))
1405 continue;
1406 if (uCounterReg == 0)
1407 break;
1408 cLeftPage = 0;
1409 }
1410 }
1411
1412 /*
1413 * Fallback - slow processing till the end of the current page.
1414 * In the cross page boundrary case we will end up here with cLeftPage
1415 * as 0, we execute one loop then.
1416 *
1417 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1418 * I/O port, otherwise it wouldn't really be restartable.
1419 */
1420 /** @todo investigate what the CPU actually does with \#PF/\#GP
1421 * during INS. */
1422 do
1423 {
1424 OP_TYPE uValue;
1425 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1426 if (rcStrict != VINF_SUCCESS)
1427 return rcStrict;
1428
1429 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1430 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1431 else
1432 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1433 if (IOM_SUCCESS(rcStrict))
1434 {
1435 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1436 pCtx->ADDR_rCX = --uCounterReg;
1437 cLeftPage--;
1438 }
1439 if (rcStrict != VINF_SUCCESS)
1440 {
1441 if (IOM_SUCCESS(rcStrict))
1442 {
1443 if (uCounterReg == 0)
1444 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1445 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1446 }
1447 return rcStrict;
1448 }
1449 } while ((int32_t)cLeftPage > 0);
1450 } while (uCounterReg != 0);
1451
1452 /*
1453 * Done.
1454 */
1455 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1456 return VINF_SUCCESS;
1457}
1458
1459#endif /* OP_SIZE != 64-bit */
1460
1461
1462#undef OP_rAX
1463#undef OP_SIZE
1464#undef ADDR_SIZE
1465#undef ADDR_rDI
1466#undef ADDR_rSI
1467#undef ADDR_rCX
1468#undef ADDR_rIP
1469#undef ADDR2_TYPE
1470#undef ADDR_TYPE
1471#undef ADDR2_TYPE
1472#undef IS_64_BIT_CODE
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette