VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 42705

Last change on this file since 42705 was 42633, checked in by vboxsync, 12 years ago

IEM: Implemented CMPXCHG8B. Fixed PGMPhysIemGCPhys2Ptr so that it doesn't return informational status returns, only VINF_SUCCESS and errors.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 50.7 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 42633 2012-08-06 17:22:56Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50#else
51# error "Bad ADDR_SIZE."
52#endif
53#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
54
55
56/**
57 * Implements 'REPE CMPS'.
58 */
59IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
60{
61 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
62
63 /*
64 * Setup.
65 */
66 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
67 if (uCounterReg == 0)
68 {
69 iemRegAddToRip(pIemCpu, cbInstr);
70 return VINF_SUCCESS;
71 }
72
73 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
74 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
75 if (rcStrict != VINF_SUCCESS)
76 return rcStrict;
77
78 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
79 if (rcStrict != VINF_SUCCESS)
80 return rcStrict;
81
82 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
83 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
84 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
85 uint32_t uEFlags = pCtx->eflags.u;
86
87 /*
88 * The loop.
89 */
90 do
91 {
92 /*
93 * Do segmentation and virtual page stuff.
94 */
95#if ADDR_SIZE != 64
96 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
97 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
98#else
99 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
100 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
101#endif
102 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
103 if (cLeftSrc1Page > uCounterReg)
104 cLeftSrc1Page = uCounterReg;
105 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
106 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
107
108 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
109 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
110#if ADDR_SIZE != 64
111 && uSrc1AddrReg < pSrc1Hid->u32Limit
112 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
113 && uSrc2AddrReg < pCtx->es.u32Limit
114 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
115#endif
116 )
117 {
118 RTGCPHYS GCPhysSrc1Mem;
119 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
120 if (rcStrict != VINF_SUCCESS)
121 return rcStrict;
122
123 RTGCPHYS GCPhysSrc2Mem;
124 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
125 if (rcStrict != VINF_SUCCESS)
126 return rcStrict;
127
128 /*
129 * If we can map the page without trouble, do a block processing
130 * until the end of the current page.
131 */
132 PGMPAGEMAPLOCK PgLockSrc2Mem;
133 OP_TYPE const *puSrc2Mem;
134 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
135 if (rcStrict == VINF_SUCCESS)
136 {
137 PGMPAGEMAPLOCK PgLockSrc1Mem;
138 OP_TYPE const *puSrc1Mem;
139 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
140 if (rcStrict == VINF_SUCCESS)
141 {
142 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
143 {
144 /* All matches, only compare the last itme to get the right eflags. */
145 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
146 uSrc1AddrReg += cLeftPage * cbIncr;
147 uSrc2AddrReg += cLeftPage * cbIncr;
148 uCounterReg -= cLeftPage;
149 }
150 else
151 {
152 /* Some mismatch, compare each item (and keep volatile
153 memory in mind). */
154 uint32_t off = 0;
155 do
156 {
157 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
158 off++;
159 } while ( off < cLeftPage
160 && (uEFlags & X86_EFL_ZF));
161 uSrc1AddrReg += cbIncr * off;
162 uSrc2AddrReg += cbIncr * off;
163 uCounterReg -= off;
164 }
165
166 /* Update the registers before looping. */
167 pCtx->ADDR_rCX = uCounterReg;
168 pCtx->ADDR_rSI = uSrc1AddrReg;
169 pCtx->ADDR_rDI = uSrc2AddrReg;
170 pCtx->eflags.u = uEFlags;
171
172 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
173 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
174 continue;
175 }
176 }
177 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
178 }
179
180 /*
181 * Fallback - slow processing till the end of the current page.
182 * In the cross page boundrary case we will end up here with cLeftPage
183 * as 0, we execute one loop then.
184 */
185 do
186 {
187 OP_TYPE uValue1;
188 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
189 if (rcStrict != VINF_SUCCESS)
190 return rcStrict;
191 OP_TYPE uValue2;
192 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
193 if (rcStrict != VINF_SUCCESS)
194 return rcStrict;
195 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
196
197 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
198 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
199 pCtx->ADDR_rCX = --uCounterReg;
200 pCtx->eflags.u = uEFlags;
201 cLeftPage--;
202 } while ( (int32_t)cLeftPage > 0
203 && (uEFlags & X86_EFL_ZF));
204 } while ( uCounterReg != 0
205 && (uEFlags & X86_EFL_ZF));
206
207 /*
208 * Done.
209 */
210 iemRegAddToRip(pIemCpu, cbInstr);
211 return VINF_SUCCESS;
212}
213
214
215/**
216 * Implements 'REPNE CMPS'.
217 */
218IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
219{
220 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
221
222 /*
223 * Setup.
224 */
225 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
226 if (uCounterReg == 0)
227 {
228 iemRegAddToRip(pIemCpu, cbInstr);
229 return VINF_SUCCESS;
230 }
231
232 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
233 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
234 if (rcStrict != VINF_SUCCESS)
235 return rcStrict;
236
237 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
238 if (rcStrict != VINF_SUCCESS)
239 return rcStrict;
240
241 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
242 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
243 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
244 uint32_t uEFlags = pCtx->eflags.u;
245
246 /*
247 * The loop.
248 */
249 do
250 {
251 /*
252 * Do segmentation and virtual page stuff.
253 */
254#if ADDR_SIZE != 64
255 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
256 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
257#else
258 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
259 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
260#endif
261 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
262 if (cLeftSrc1Page > uCounterReg)
263 cLeftSrc1Page = uCounterReg;
264 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
266
267 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
268 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
269#if ADDR_SIZE != 64
270 && uSrc1AddrReg < pSrc1Hid->u32Limit
271 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
272 && uSrc2AddrReg < pCtx->es.u32Limit
273 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
274#endif
275 )
276 {
277 RTGCPHYS GCPhysSrc1Mem;
278 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
279 if (rcStrict != VINF_SUCCESS)
280 return rcStrict;
281
282 RTGCPHYS GCPhysSrc2Mem;
283 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
284 if (rcStrict != VINF_SUCCESS)
285 return rcStrict;
286
287 /*
288 * If we can map the page without trouble, do a block processing
289 * until the end of the current page.
290 */
291 OP_TYPE const *puSrc2Mem;
292 PGMPAGEMAPLOCK PgLockSrc2Mem;
293 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
294 if (rcStrict == VINF_SUCCESS)
295 {
296 OP_TYPE const *puSrc1Mem;
297 PGMPAGEMAPLOCK PgLockSrc1Mem;
298 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
299 if (rcStrict == VINF_SUCCESS)
300 {
301 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
302 {
303 /* All matches, only compare the last item to get the right eflags. */
304 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
305 uSrc1AddrReg += cLeftPage * cbIncr;
306 uSrc2AddrReg += cLeftPage * cbIncr;
307 uCounterReg -= cLeftPage;
308 }
309 else
310 {
311 /* Some mismatch, compare each item (and keep volatile
312 memory in mind). */
313 uint32_t off = 0;
314 do
315 {
316 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
317 off++;
318 } while ( off < cLeftPage
319 && !(uEFlags & X86_EFL_ZF));
320 uSrc1AddrReg += cbIncr * off;
321 uSrc2AddrReg += cbIncr * off;
322 uCounterReg -= off;
323 }
324
325 /* Update the registers before looping. */
326 pCtx->ADDR_rCX = uCounterReg;
327 pCtx->ADDR_rSI = uSrc1AddrReg;
328 pCtx->ADDR_rDI = uSrc2AddrReg;
329 pCtx->eflags.u = uEFlags;
330
331 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
332 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
333 continue;
334 }
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 }
337 }
338
339 /*
340 * Fallback - slow processing till the end of the current page.
341 * In the cross page boundrary case we will end up here with cLeftPage
342 * as 0, we execute one loop then.
343 */
344 do
345 {
346 OP_TYPE uValue1;
347 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
348 if (rcStrict != VINF_SUCCESS)
349 return rcStrict;
350 OP_TYPE uValue2;
351 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
352 if (rcStrict != VINF_SUCCESS)
353 return rcStrict;
354 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
355
356 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
357 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
358 pCtx->ADDR_rCX = --uCounterReg;
359 pCtx->eflags.u = uEFlags;
360 cLeftPage--;
361 } while ( (int32_t)cLeftPage > 0
362 && !(uEFlags & X86_EFL_ZF));
363 } while ( uCounterReg != 0
364 && !(uEFlags & X86_EFL_ZF));
365
366 /*
367 * Done.
368 */
369 iemRegAddToRip(pIemCpu, cbInstr);
370 return VINF_SUCCESS;
371}
372
373
374/**
375 * Implements 'REPE SCAS'.
376 */
377IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
378{
379 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
380
381 /*
382 * Setup.
383 */
384 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
385 if (uCounterReg == 0)
386 {
387 iemRegAddToRip(pIemCpu, cbInstr);
388 return VINF_SUCCESS;
389 }
390
391 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
392 if (rcStrict != VINF_SUCCESS)
393 return rcStrict;
394
395 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
396 OP_TYPE const uValueReg = pCtx->OP_rAX;
397 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
398 uint32_t uEFlags = pCtx->eflags.u;
399
400 /*
401 * The loop.
402 */
403 do
404 {
405 /*
406 * Do segmentation and virtual page stuff.
407 */
408#if ADDR_SIZE != 64
409 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
410#else
411 uint64_t uVirtAddr = uAddrReg;
412#endif
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418#if ADDR_SIZE != 64
419 && uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
421#endif
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRip(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRip(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
517 if (rcStrict != VINF_SUCCESS)
518 return rcStrict;
519
520 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
521 OP_TYPE const uValueReg = pCtx->OP_rAX;
522 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
523 uint32_t uEFlags = pCtx->eflags.u;
524
525 /*
526 * The loop.
527 */
528 do
529 {
530 /*
531 * Do segmentation and virtual page stuff.
532 */
533#if ADDR_SIZE != 64
534 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
535#else
536 uint64_t uVirtAddr = uAddrReg;
537#endif
538 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
539 if (cLeftPage > uCounterReg)
540 cLeftPage = uCounterReg;
541 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
542 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
543#if ADDR_SIZE != 64
544 && uAddrReg < pCtx->es.u32Limit
545 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
546#endif
547 )
548 {
549 RTGCPHYS GCPhysMem;
550 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
551 if (rcStrict != VINF_SUCCESS)
552 return rcStrict;
553
554 /*
555 * If we can map the page without trouble, do a block processing
556 * until the end of the current page.
557 */
558 PGMPAGEMAPLOCK PgLockMem;
559 OP_TYPE const *puMem;
560 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
561 if (rcStrict == VINF_SUCCESS)
562 {
563 /* Search till we find a mismatching item. */
564 OP_TYPE uTmpValue;
565 bool fQuit;
566 uint32_t i = 0;
567 do
568 {
569 uTmpValue = puMem[i++];
570 fQuit = uTmpValue == uValueReg;
571 } while (i < cLeftPage && !fQuit);
572
573 /* Update the regs. */
574 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
575 pCtx->ADDR_rCX = uCounterReg -= i;
576 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
577 pCtx->eflags.u = uEFlags;
578 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
579 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
580 if (fQuit)
581 break;
582
583
584 /* If unaligned, we drop thru and do the page crossing access
585 below. Otherwise, do the next page. */
586 if (!(uVirtAddr & (OP_SIZE - 1)))
587 continue;
588 if (uCounterReg == 0)
589 break;
590 cLeftPage = 0;
591 }
592 }
593
594 /*
595 * Fallback - slow processing till the end of the current page.
596 * In the cross page boundrary case we will end up here with cLeftPage
597 * as 0, we execute one loop then.
598 */
599 do
600 {
601 OP_TYPE uTmpValue;
602 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
603 if (rcStrict != VINF_SUCCESS)
604 return rcStrict;
605 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
606 pCtx->ADDR_rDI = uAddrReg += cbIncr;
607 pCtx->ADDR_rCX = --uCounterReg;
608 pCtx->eflags.u = uEFlags;
609 cLeftPage--;
610 } while ( (int32_t)cLeftPage > 0
611 && !(uEFlags & X86_EFL_ZF));
612 } while ( uCounterReg != 0
613 && !(uEFlags & X86_EFL_ZF));
614
615 /*
616 * Done.
617 */
618 iemRegAddToRip(pIemCpu, cbInstr);
619 return VINF_SUCCESS;
620}
621
622
623
624
625/**
626 * Implements 'REP MOVS'.
627 */
628IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
629{
630 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
631
632 /*
633 * Setup.
634 */
635 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
636 if (uCounterReg == 0)
637 {
638 iemRegAddToRip(pIemCpu, cbInstr);
639 return VINF_SUCCESS;
640 }
641
642 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
643 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
644 if (rcStrict != VINF_SUCCESS)
645 return rcStrict;
646
647 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
648 if (rcStrict != VINF_SUCCESS)
649 return rcStrict;
650
651 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
652 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
653 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
654
655 /*
656 * If we're reading back what we write, we have to let the verfication code
657 * to prevent a false positive.
658 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
659 */
660#ifdef IEM_VERIFICATION_MODE
661 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
662 && (cbIncr > 0
663 ? uSrcAddrReg <= uDstAddrReg
664 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
665 : uDstAddrReg <= uSrcAddrReg
666 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
667 pIemCpu->fOverlappingMovs = true;
668#endif
669
670 /*
671 * The loop.
672 */
673 do
674 {
675 /*
676 * Do segmentation and virtual page stuff.
677 */
678#if ADDR_SIZE != 64
679 ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg;
680 ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->es.u64Base + uDstAddrReg;
681#else
682 uint64_t uVirtSrcAddr = uSrcAddrReg;
683 uint64_t uVirtDstAddr = uDstAddrReg;
684#endif
685 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
686 if (cLeftSrcPage > uCounterReg)
687 cLeftSrcPage = uCounterReg;
688 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
689 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
690
691 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
692 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
693#if ADDR_SIZE != 64
694 && uSrcAddrReg < pSrcHid->u32Limit
695 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
696 && uDstAddrReg < pCtx->es.u32Limit
697 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
698#endif
699 )
700 {
701 RTGCPHYS GCPhysSrcMem;
702 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
703 if (rcStrict != VINF_SUCCESS)
704 return rcStrict;
705
706 RTGCPHYS GCPhysDstMem;
707 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
708 if (rcStrict != VINF_SUCCESS)
709 return rcStrict;
710
711 /*
712 * If we can map the page without trouble, do a block processing
713 * until the end of the current page.
714 */
715 PGMPAGEMAPLOCK PgLockDstMem;
716 OP_TYPE *puDstMem;
717 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
718 if (rcStrict == VINF_SUCCESS)
719 {
720 PGMPAGEMAPLOCK PgLockSrcMem;
721 OP_TYPE const *puSrcMem;
722 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
723 if (rcStrict == VINF_SUCCESS)
724 {
725 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
726 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
727
728 /* Perform the operation exactly (don't use memcpy to avoid
729 having to consider how its implementation would affect
730 any overlapping source and destination area). */
731 OP_TYPE const *puSrcCur = puSrcMem;
732 OP_TYPE *puDstCur = puDstMem;
733 uint32_t cTodo = cLeftPage;
734 while (cTodo-- > 0)
735 *puDstCur++ = *puSrcCur++;
736
737 /* Update the registers. */
738 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
739 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
740 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
741
742 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
743 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
744 continue;
745 }
746 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
747 }
748 }
749
750 /*
751 * Fallback - slow processing till the end of the current page.
752 * In the cross page boundrary case we will end up here with cLeftPage
753 * as 0, we execute one loop then.
754 */
755 do
756 {
757 OP_TYPE uValue;
758 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
759 if (rcStrict != VINF_SUCCESS)
760 return rcStrict;
761 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
762 if (rcStrict != VINF_SUCCESS)
763 return rcStrict;
764
765 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
766 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
767 pCtx->ADDR_rCX = --uCounterReg;
768 cLeftPage--;
769 } while ((int32_t)cLeftPage > 0);
770 } while (uCounterReg != 0);
771
772 /*
773 * Done.
774 */
775 iemRegAddToRip(pIemCpu, cbInstr);
776 return VINF_SUCCESS;
777}
778
779
780/**
781 * Implements 'REP STOS'.
782 */
783IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
784{
785 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
786
787 /*
788 * Setup.
789 */
790 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
791 if (uCounterReg == 0)
792 {
793 iemRegAddToRip(pIemCpu, cbInstr);
794 return VINF_SUCCESS;
795 }
796
797 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
798 if (rcStrict != VINF_SUCCESS)
799 return rcStrict;
800
801 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
802 OP_TYPE const uValue = pCtx->OP_rAX;
803 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
804
805 /*
806 * The loop.
807 */
808 do
809 {
810 /*
811 * Do segmentation and virtual page stuff.
812 */
813#if ADDR_SIZE != 64
814 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
815#else
816 uint64_t uVirtAddr = uAddrReg;
817#endif
818 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
819 if (cLeftPage > uCounterReg)
820 cLeftPage = uCounterReg;
821 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
822 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
823#if ADDR_SIZE != 64
824 && uAddrReg < pCtx->es.u32Limit
825 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
826#endif
827 )
828 {
829 RTGCPHYS GCPhysMem;
830 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
831 if (rcStrict != VINF_SUCCESS)
832 return rcStrict;
833
834 /*
835 * If we can map the page without trouble, do a block processing
836 * until the end of the current page.
837 */
838 PGMPAGEMAPLOCK PgLockMem;
839 OP_TYPE *puMem;
840 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
841 if (rcStrict == VINF_SUCCESS)
842 {
843 /* Update the regs first so we can loop on cLeftPage. */
844 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
845 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
846
847 /* Do the memsetting. */
848#if OP_SIZE == 8
849 memset(puMem, uValue, cLeftPage);
850/*#elif OP_SIZE == 32
851 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
852#else
853 while (cLeftPage-- > 0)
854 *puMem++ = uValue;
855#endif
856
857 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
858
859 /* If unaligned, we drop thru and do the page crossing access
860 below. Otherwise, do the next page. */
861 if (!(uVirtAddr & (OP_SIZE - 1)))
862 continue;
863 if (uCounterReg == 0)
864 break;
865 cLeftPage = 0;
866 }
867 }
868
869 /*
870 * Fallback - slow processing till the end of the current page.
871 * In the cross page boundrary case we will end up here with cLeftPage
872 * as 0, we execute one loop then.
873 */
874 do
875 {
876 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
877 if (rcStrict != VINF_SUCCESS)
878 return rcStrict;
879 pCtx->ADDR_rDI = uAddrReg += cbIncr;
880 pCtx->ADDR_rCX = --uCounterReg;
881 cLeftPage--;
882 } while ((int32_t)cLeftPage > 0);
883 } while (uCounterReg != 0);
884
885 /*
886 * Done.
887 */
888 iemRegAddToRip(pIemCpu, cbInstr);
889 return VINF_SUCCESS;
890}
891
892
893/**
894 * Implements 'REP LODS'.
895 */
896IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
897{
898 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
899
900 /*
901 * Setup.
902 */
903 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
904 if (uCounterReg == 0)
905 {
906 iemRegAddToRip(pIemCpu, cbInstr);
907 return VINF_SUCCESS;
908 }
909
910 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
911 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
912 if (rcStrict != VINF_SUCCESS)
913 return rcStrict;
914
915 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
916 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
917
918 /*
919 * The loop.
920 */
921 do
922 {
923 /*
924 * Do segmentation and virtual page stuff.
925 */
926#if ADDR_SIZE != 64
927 ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg;
928#else
929 uint64_t uVirtAddr = uAddrReg;
930#endif
931 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
932 if (cLeftPage > uCounterReg)
933 cLeftPage = uCounterReg;
934 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
935 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
936#if ADDR_SIZE != 64
937 && uAddrReg < pSrcHid->u32Limit
938 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
939#endif
940 )
941 {
942 RTGCPHYS GCPhysMem;
943 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
944 if (rcStrict != VINF_SUCCESS)
945 return rcStrict;
946
947 /*
948 * If we can map the page without trouble, we can get away with
949 * just reading the last value on the page.
950 */
951 PGMPAGEMAPLOCK PgLockMem;
952 OP_TYPE const *puMem;
953 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
954 if (rcStrict == VINF_SUCCESS)
955 {
956 /* Only get the last byte, the rest doesn't matter in direct access mode. */
957#if OP_SIZE == 32
958 pCtx->rax = puMem[cLeftPage - 1];
959#else
960 pCtx->OP_rAX = puMem[cLeftPage - 1];
961#endif
962 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
963 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
964 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
965
966 /* If unaligned, we drop thru and do the page crossing access
967 below. Otherwise, do the next page. */
968 if (!(uVirtAddr & (OP_SIZE - 1)))
969 continue;
970 if (uCounterReg == 0)
971 break;
972 cLeftPage = 0;
973 }
974 }
975
976 /*
977 * Fallback - slow processing till the end of the current page.
978 * In the cross page boundrary case we will end up here with cLeftPage
979 * as 0, we execute one loop then.
980 */
981 do
982 {
983 OP_TYPE uTmpValue;
984 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
985 if (rcStrict != VINF_SUCCESS)
986 return rcStrict;
987#if OP_SIZE == 32
988 pCtx->rax = uTmpValue;
989#else
990 pCtx->OP_rAX = uTmpValue;
991#endif
992 pCtx->ADDR_rSI = uAddrReg += cbIncr;
993 pCtx->ADDR_rCX = --uCounterReg;
994 cLeftPage--;
995 } while ((int32_t)cLeftPage > 0);
996 if (rcStrict != VINF_SUCCESS)
997 break;
998 } while (uCounterReg != 0);
999
1000 /*
1001 * Done.
1002 */
1003 iemRegAddToRip(pIemCpu, cbInstr);
1004 return VINF_SUCCESS;
1005}
1006
1007
1008#if OP_SIZE != 64
1009
1010/**
1011 * Implements 'INS' (no rep)
1012 */
1013IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1014{
1015 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1016 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1017 VBOXSTRICTRC rcStrict;
1018
1019 /*
1020 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1021 * segmentation and finally any #PF due to virtual address translation.
1022 * ASSUMES nothing is read from the I/O port before traps are taken.
1023 */
1024 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1025 if (rcStrict != VINF_SUCCESS)
1026 return rcStrict;
1027
1028 OP_TYPE *puMem;
1029 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1030 if (rcStrict != VINF_SUCCESS)
1031 return rcStrict;
1032
1033 uint32_t u32Value;
1034 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1035 rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8);
1036 else
1037 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1038 if (IOM_SUCCESS(rcStrict))
1039 {
1040 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1041 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1042 {
1043 if (!pCtx->eflags.Bits.u1DF)
1044 pCtx->ADDR_rDI += OP_SIZE / 8;
1045 else
1046 pCtx->ADDR_rDI -= OP_SIZE / 8;
1047 iemRegAddToRip(pIemCpu, cbInstr);
1048 }
1049 /* iemMemMap already check permissions, so this may only be real errors
1050 or access handlers medling. The access handler case is going to
1051 cause misbehavior if the instruction is re-interpreted or smth. So,
1052 we fail with an internal error here instead. */
1053 else
1054 AssertLogRelFailedReturn(VERR_IEM_IPE_1);
1055 }
1056 return rcStrict;
1057}
1058
1059
1060/**
1061 * Implements 'REP INS'.
1062 */
1063IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1064{
1065 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1066 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1067
1068 /*
1069 * Setup.
1070 */
1071 uint16_t const u16Port = pCtx->dx;
1072 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1073 if (rcStrict != VINF_SUCCESS)
1074 return rcStrict;
1075
1076 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1077 if (uCounterReg == 0)
1078 {
1079 iemRegAddToRip(pIemCpu, cbInstr);
1080 return VINF_SUCCESS;
1081 }
1082
1083 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
1084 if (rcStrict != VINF_SUCCESS)
1085 return rcStrict;
1086
1087 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1088 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1089
1090 /*
1091 * The loop.
1092 */
1093 do
1094 {
1095 /*
1096 * Do segmentation and virtual page stuff.
1097 */
1098#if ADDR_SIZE != 64
1099 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
1100#else
1101 uint64_t uVirtAddr = uAddrReg;
1102#endif
1103 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1104 if (cLeftPage > uCounterReg)
1105 cLeftPage = uCounterReg;
1106 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1107 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1108#if ADDR_SIZE != 64
1109 && uAddrReg < pCtx->es.u32Limit
1110 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
1111#endif
1112 )
1113 {
1114 RTGCPHYS GCPhysMem;
1115 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1116 if (rcStrict != VINF_SUCCESS)
1117 return rcStrict;
1118
1119 /*
1120 * If we can map the page without trouble, we would've liked to use
1121 * an string I/O method to do the work, but the current IOM
1122 * interface doesn't match our current approach. So, do a regular
1123 * loop instead.
1124 */
1125 /** @todo Change the I/O manager interface to make use of
1126 * mapped buffers instead of leaving those bits to the
1127 * device implementation? */
1128 PGMPAGEMAPLOCK PgLockMem;
1129 OP_TYPE *puMem;
1130 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1131 if (rcStrict == VINF_SUCCESS)
1132 {
1133 uint32_t off = 0;
1134 while (off < cLeftPage)
1135 {
1136 uint32_t u32Value;
1137 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1138 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1139 else
1140 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1141 if (IOM_SUCCESS(rcStrict))
1142 {
1143 puMem[off] = (OP_TYPE)u32Value;
1144 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1145 pCtx->ADDR_rCX = --uCounterReg;
1146 }
1147 if (rcStrict != VINF_SUCCESS)
1148 {
1149 if (IOM_SUCCESS(rcStrict))
1150 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1151 if (uCounterReg == 0)
1152 iemRegAddToRip(pIemCpu, cbInstr);
1153 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1154 return rcStrict;
1155 }
1156 off++;
1157 }
1158 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1159
1160 /* If unaligned, we drop thru and do the page crossing access
1161 below. Otherwise, do the next page. */
1162 if (!(uVirtAddr & (OP_SIZE - 1)))
1163 continue;
1164 if (uCounterReg == 0)
1165 break;
1166 cLeftPage = 0;
1167 }
1168 }
1169
1170 /*
1171 * Fallback - slow processing till the end of the current page.
1172 * In the cross page boundrary case we will end up here with cLeftPage
1173 * as 0, we execute one loop then.
1174 *
1175 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1176 * I/O port, otherwise it wouldn't really be restartable.
1177 */
1178 /** @todo investigate what the CPU actually does with \#PF/\#GP
1179 * during INS. */
1180 do
1181 {
1182 OP_TYPE *puMem;
1183 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1184 if (rcStrict != VINF_SUCCESS)
1185 return rcStrict;
1186
1187 uint32_t u32Value;
1188 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1189 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1190 else
1191 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1192 if (!IOM_SUCCESS(rcStrict))
1193 return rcStrict;
1194
1195 *puMem = (OP_TYPE)u32Value;
1196 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1197 AssertLogRelReturn(rcStrict2 == VINF_SUCCESS, VERR_IEM_IPE_1); /* See non-rep version. */
1198
1199 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1200 pCtx->ADDR_rCX = --uCounterReg;
1201
1202 cLeftPage--;
1203 if (rcStrict != VINF_SUCCESS)
1204 {
1205 if (IOM_SUCCESS(rcStrict))
1206 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1207 if (uCounterReg == 0)
1208 iemRegAddToRip(pIemCpu, cbInstr);
1209 return rcStrict;
1210 }
1211 } while ((int32_t)cLeftPage > 0);
1212 } while (uCounterReg != 0);
1213
1214 /*
1215 * Done.
1216 */
1217 iemRegAddToRip(pIemCpu, cbInstr);
1218 return VINF_SUCCESS;
1219}
1220
1221
1222/**
1223 * Implements 'OUTS' (no rep)
1224 */
1225IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1226{
1227 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1228 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1229 VBOXSTRICTRC rcStrict;
1230
1231 /*
1232 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1233 * segmentation and finally any #PF due to virtual address translation.
1234 * ASSUMES nothing is read from the I/O port before traps are taken.
1235 */
1236 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1237 if (rcStrict != VINF_SUCCESS)
1238 return rcStrict;
1239
1240 OP_TYPE uValue;
1241 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1242 if (rcStrict == VINF_SUCCESS)
1243 {
1244 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1245 rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8);
1246 else
1247 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1248 if (IOM_SUCCESS(rcStrict))
1249 {
1250 if (!pCtx->eflags.Bits.u1DF)
1251 pCtx->ADDR_rSI += OP_SIZE / 8;
1252 else
1253 pCtx->ADDR_rSI -= OP_SIZE / 8;
1254 iemRegAddToRip(pIemCpu, cbInstr);
1255 if (rcStrict != VINF_SUCCESS)
1256 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1257 }
1258 }
1259 return rcStrict;
1260}
1261
1262
1263/**
1264 * Implements 'REP OUTS'.
1265 */
1266IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1267{
1268 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1269 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1270
1271 /*
1272 * Setup.
1273 */
1274 uint16_t const u16Port = pCtx->dx;
1275 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1276 if (rcStrict != VINF_SUCCESS)
1277 return rcStrict;
1278
1279 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1280 if (uCounterReg == 0)
1281 {
1282 iemRegAddToRip(pIemCpu, cbInstr);
1283 return VINF_SUCCESS;
1284 }
1285
1286 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1287 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg);
1288 if (rcStrict != VINF_SUCCESS)
1289 return rcStrict;
1290
1291 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1292 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1293
1294 /*
1295 * The loop.
1296 */
1297 do
1298 {
1299 /*
1300 * Do segmentation and virtual page stuff.
1301 */
1302#if ADDR_SIZE != 64
1303 ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg;
1304#else
1305 uint64_t uVirtAddr = uAddrReg;
1306#endif
1307 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1308 if (cLeftPage > uCounterReg)
1309 cLeftPage = uCounterReg;
1310 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1311 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1312#if ADDR_SIZE != 64
1313 && uAddrReg < pHid->u32Limit
1314 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit
1315#endif
1316 )
1317 {
1318 RTGCPHYS GCPhysMem;
1319 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1320 if (rcStrict != VINF_SUCCESS)
1321 return rcStrict;
1322
1323 /*
1324 * If we can map the page without trouble, we would've liked to use
1325 * an string I/O method to do the work, but the current IOM
1326 * interface doesn't match our current approach. So, do a regular
1327 * loop instead.
1328 */
1329 /** @todo Change the I/O manager interface to make use of
1330 * mapped buffers instead of leaving those bits to the
1331 * device implementation? */
1332 PGMPAGEMAPLOCK PgLockMem;
1333 OP_TYPE const *puMem;
1334 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1335 if (rcStrict == VINF_SUCCESS)
1336 {
1337 uint32_t off = 0;
1338 while (off < cLeftPage)
1339 {
1340 uint32_t u32Value = *puMem++;
1341 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1342 rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8);
1343 else
1344 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1345 if (IOM_SUCCESS(rcStrict))
1346 {
1347 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1348 pCtx->ADDR_rCX = --uCounterReg;
1349 }
1350 if (rcStrict != VINF_SUCCESS)
1351 {
1352 if (IOM_SUCCESS(rcStrict))
1353 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1354 if (uCounterReg == 0)
1355 iemRegAddToRip(pIemCpu, cbInstr);
1356 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1357 return rcStrict;
1358 }
1359 off++;
1360 }
1361 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1362
1363 /* If unaligned, we drop thru and do the page crossing access
1364 below. Otherwise, do the next page. */
1365 if (!(uVirtAddr & (OP_SIZE - 1)))
1366 continue;
1367 if (uCounterReg == 0)
1368 break;
1369 cLeftPage = 0;
1370 }
1371 }
1372
1373 /*
1374 * Fallback - slow processing till the end of the current page.
1375 * In the cross page boundrary case we will end up here with cLeftPage
1376 * as 0, we execute one loop then.
1377 *
1378 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1379 * I/O port, otherwise it wouldn't really be restartable.
1380 */
1381 /** @todo investigate what the CPU actually does with \#PF/\#GP
1382 * during INS. */
1383 do
1384 {
1385 OP_TYPE uValue;
1386 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1387 if (rcStrict != VINF_SUCCESS)
1388 return rcStrict;
1389
1390 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1391 rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8);
1392 else
1393 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1394 if (IOM_SUCCESS(rcStrict))
1395 {
1396 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1397 pCtx->ADDR_rCX = --uCounterReg;
1398 cLeftPage--;
1399 }
1400 if (rcStrict != VINF_SUCCESS)
1401 {
1402 if (IOM_SUCCESS(rcStrict))
1403 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1404 if (uCounterReg == 0)
1405 iemRegAddToRip(pIemCpu, cbInstr);
1406 return rcStrict;
1407 }
1408 } while ((int32_t)cLeftPage > 0);
1409 } while (uCounterReg != 0);
1410
1411 /*
1412 * Done.
1413 */
1414 iemRegAddToRip(pIemCpu, cbInstr);
1415 return VINF_SUCCESS;
1416}
1417
1418#endif /* OP_SIZE != 64-bit */
1419
1420
1421#undef OP_rAX
1422#undef OP_SIZE
1423#undef ADDR_SIZE
1424#undef ADDR_rDI
1425#undef ADDR_rSI
1426#undef ADDR_rCX
1427#undef ADDR_rIP
1428#undef ADDR2_TYPE
1429#undef ADDR_TYPE
1430#undef ADDR2_TYPE
1431
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette