VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 43667

Last change on this file since 43667 was 42761, checked in by vboxsync, 12 years ago

IEM: Bail out on REP STOS and other string ops when handler bypassing is enabled. (Will fix later.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 51.7 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 42761 2012-08-10 18:23:20Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50#else
51# error "Bad ADDR_SIZE."
52#endif
53#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
54
55
56/**
57 * Implements 'REPE CMPS'.
58 */
59IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
60{
61 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
62
63 /*
64 * Setup.
65 */
66 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
67 if (uCounterReg == 0)
68 {
69 iemRegAddToRip(pIemCpu, cbInstr);
70 return VINF_SUCCESS;
71 }
72
73 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
74 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
75 if (rcStrict != VINF_SUCCESS)
76 return rcStrict;
77
78 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
79 if (rcStrict != VINF_SUCCESS)
80 return rcStrict;
81
82 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
83 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
84 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
85 uint32_t uEFlags = pCtx->eflags.u;
86
87 /*
88 * The loop.
89 */
90 do
91 {
92 /*
93 * Do segmentation and virtual page stuff.
94 */
95#if ADDR_SIZE != 64
96 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
97 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
98#else
99 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
100 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
101#endif
102 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
103 if (cLeftSrc1Page > uCounterReg)
104 cLeftSrc1Page = uCounterReg;
105 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
106 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
107
108 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
109 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
110#if ADDR_SIZE != 64
111 && uSrc1AddrReg < pSrc1Hid->u32Limit
112 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
113 && uSrc2AddrReg < pCtx->es.u32Limit
114 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
115#endif
116 )
117 {
118 RTGCPHYS GCPhysSrc1Mem;
119 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
120 if (rcStrict != VINF_SUCCESS)
121 return rcStrict;
122
123 RTGCPHYS GCPhysSrc2Mem;
124 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
125 if (rcStrict != VINF_SUCCESS)
126 return rcStrict;
127
128 /*
129 * If we can map the page without trouble, do a block processing
130 * until the end of the current page.
131 */
132 PGMPAGEMAPLOCK PgLockSrc2Mem;
133 OP_TYPE const *puSrc2Mem;
134 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
135 if (rcStrict == VINF_SUCCESS)
136 {
137 PGMPAGEMAPLOCK PgLockSrc1Mem;
138 OP_TYPE const *puSrc1Mem;
139 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
140 if (rcStrict == VINF_SUCCESS)
141 {
142 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
143 {
144 /* All matches, only compare the last itme to get the right eflags. */
145 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
146 uSrc1AddrReg += cLeftPage * cbIncr;
147 uSrc2AddrReg += cLeftPage * cbIncr;
148 uCounterReg -= cLeftPage;
149 }
150 else
151 {
152 /* Some mismatch, compare each item (and keep volatile
153 memory in mind). */
154 uint32_t off = 0;
155 do
156 {
157 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
158 off++;
159 } while ( off < cLeftPage
160 && (uEFlags & X86_EFL_ZF));
161 uSrc1AddrReg += cbIncr * off;
162 uSrc2AddrReg += cbIncr * off;
163 uCounterReg -= off;
164 }
165
166 /* Update the registers before looping. */
167 pCtx->ADDR_rCX = uCounterReg;
168 pCtx->ADDR_rSI = uSrc1AddrReg;
169 pCtx->ADDR_rDI = uSrc2AddrReg;
170 pCtx->eflags.u = uEFlags;
171
172 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
173 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
174 continue;
175 }
176 }
177 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
178 }
179
180 /*
181 * Fallback - slow processing till the end of the current page.
182 * In the cross page boundrary case we will end up here with cLeftPage
183 * as 0, we execute one loop then.
184 */
185 do
186 {
187 OP_TYPE uValue1;
188 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
189 if (rcStrict != VINF_SUCCESS)
190 return rcStrict;
191 OP_TYPE uValue2;
192 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
193 if (rcStrict != VINF_SUCCESS)
194 return rcStrict;
195 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
196
197 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
198 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
199 pCtx->ADDR_rCX = --uCounterReg;
200 pCtx->eflags.u = uEFlags;
201 cLeftPage--;
202 } while ( (int32_t)cLeftPage > 0
203 && (uEFlags & X86_EFL_ZF));
204 } while ( uCounterReg != 0
205 && (uEFlags & X86_EFL_ZF));
206
207 /*
208 * Done.
209 */
210 iemRegAddToRip(pIemCpu, cbInstr);
211 return VINF_SUCCESS;
212}
213
214
215/**
216 * Implements 'REPNE CMPS'.
217 */
218IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
219{
220 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
221
222 /*
223 * Setup.
224 */
225 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
226 if (uCounterReg == 0)
227 {
228 iemRegAddToRip(pIemCpu, cbInstr);
229 return VINF_SUCCESS;
230 }
231
232 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
233 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
234 if (rcStrict != VINF_SUCCESS)
235 return rcStrict;
236
237 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
238 if (rcStrict != VINF_SUCCESS)
239 return rcStrict;
240
241 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
242 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
243 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
244 uint32_t uEFlags = pCtx->eflags.u;
245
246 /*
247 * The loop.
248 */
249 do
250 {
251 /*
252 * Do segmentation and virtual page stuff.
253 */
254#if ADDR_SIZE != 64
255 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
256 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
257#else
258 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
259 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
260#endif
261 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
262 if (cLeftSrc1Page > uCounterReg)
263 cLeftSrc1Page = uCounterReg;
264 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
266
267 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
268 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
269#if ADDR_SIZE != 64
270 && uSrc1AddrReg < pSrc1Hid->u32Limit
271 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
272 && uSrc2AddrReg < pCtx->es.u32Limit
273 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
274#endif
275 )
276 {
277 RTGCPHYS GCPhysSrc1Mem;
278 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
279 if (rcStrict != VINF_SUCCESS)
280 return rcStrict;
281
282 RTGCPHYS GCPhysSrc2Mem;
283 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
284 if (rcStrict != VINF_SUCCESS)
285 return rcStrict;
286
287 /*
288 * If we can map the page without trouble, do a block processing
289 * until the end of the current page.
290 */
291 OP_TYPE const *puSrc2Mem;
292 PGMPAGEMAPLOCK PgLockSrc2Mem;
293 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
294 if (rcStrict == VINF_SUCCESS)
295 {
296 OP_TYPE const *puSrc1Mem;
297 PGMPAGEMAPLOCK PgLockSrc1Mem;
298 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
299 if (rcStrict == VINF_SUCCESS)
300 {
301 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
302 {
303 /* All matches, only compare the last item to get the right eflags. */
304 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
305 uSrc1AddrReg += cLeftPage * cbIncr;
306 uSrc2AddrReg += cLeftPage * cbIncr;
307 uCounterReg -= cLeftPage;
308 }
309 else
310 {
311 /* Some mismatch, compare each item (and keep volatile
312 memory in mind). */
313 uint32_t off = 0;
314 do
315 {
316 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
317 off++;
318 } while ( off < cLeftPage
319 && !(uEFlags & X86_EFL_ZF));
320 uSrc1AddrReg += cbIncr * off;
321 uSrc2AddrReg += cbIncr * off;
322 uCounterReg -= off;
323 }
324
325 /* Update the registers before looping. */
326 pCtx->ADDR_rCX = uCounterReg;
327 pCtx->ADDR_rSI = uSrc1AddrReg;
328 pCtx->ADDR_rDI = uSrc2AddrReg;
329 pCtx->eflags.u = uEFlags;
330
331 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
332 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
333 continue;
334 }
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 }
337 }
338
339 /*
340 * Fallback - slow processing till the end of the current page.
341 * In the cross page boundrary case we will end up here with cLeftPage
342 * as 0, we execute one loop then.
343 */
344 do
345 {
346 OP_TYPE uValue1;
347 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
348 if (rcStrict != VINF_SUCCESS)
349 return rcStrict;
350 OP_TYPE uValue2;
351 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
352 if (rcStrict != VINF_SUCCESS)
353 return rcStrict;
354 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
355
356 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
357 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
358 pCtx->ADDR_rCX = --uCounterReg;
359 pCtx->eflags.u = uEFlags;
360 cLeftPage--;
361 } while ( (int32_t)cLeftPage > 0
362 && !(uEFlags & X86_EFL_ZF));
363 } while ( uCounterReg != 0
364 && !(uEFlags & X86_EFL_ZF));
365
366 /*
367 * Done.
368 */
369 iemRegAddToRip(pIemCpu, cbInstr);
370 return VINF_SUCCESS;
371}
372
373
374/**
375 * Implements 'REPE SCAS'.
376 */
377IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
378{
379 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
380
381 /*
382 * Setup.
383 */
384 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
385 if (uCounterReg == 0)
386 {
387 iemRegAddToRip(pIemCpu, cbInstr);
388 return VINF_SUCCESS;
389 }
390
391 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
392 if (rcStrict != VINF_SUCCESS)
393 return rcStrict;
394
395 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
396 OP_TYPE const uValueReg = pCtx->OP_rAX;
397 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
398 uint32_t uEFlags = pCtx->eflags.u;
399
400 /*
401 * The loop.
402 */
403 do
404 {
405 /*
406 * Do segmentation and virtual page stuff.
407 */
408#if ADDR_SIZE != 64
409 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
410#else
411 uint64_t uVirtAddr = uAddrReg;
412#endif
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418#if ADDR_SIZE != 64
419 && uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
421#endif
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRip(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRip(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
517 if (rcStrict != VINF_SUCCESS)
518 return rcStrict;
519
520 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
521 OP_TYPE const uValueReg = pCtx->OP_rAX;
522 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
523 uint32_t uEFlags = pCtx->eflags.u;
524
525 /*
526 * The loop.
527 */
528 do
529 {
530 /*
531 * Do segmentation and virtual page stuff.
532 */
533#if ADDR_SIZE != 64
534 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
535#else
536 uint64_t uVirtAddr = uAddrReg;
537#endif
538 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
539 if (cLeftPage > uCounterReg)
540 cLeftPage = uCounterReg;
541 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
542 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
543#if ADDR_SIZE != 64
544 && uAddrReg < pCtx->es.u32Limit
545 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
546#endif
547 )
548 {
549 RTGCPHYS GCPhysMem;
550 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
551 if (rcStrict != VINF_SUCCESS)
552 return rcStrict;
553
554 /*
555 * If we can map the page without trouble, do a block processing
556 * until the end of the current page.
557 */
558 PGMPAGEMAPLOCK PgLockMem;
559 OP_TYPE const *puMem;
560 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
561 if (rcStrict == VINF_SUCCESS)
562 {
563 /* Search till we find a mismatching item. */
564 OP_TYPE uTmpValue;
565 bool fQuit;
566 uint32_t i = 0;
567 do
568 {
569 uTmpValue = puMem[i++];
570 fQuit = uTmpValue == uValueReg;
571 } while (i < cLeftPage && !fQuit);
572
573 /* Update the regs. */
574 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
575 pCtx->ADDR_rCX = uCounterReg -= i;
576 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
577 pCtx->eflags.u = uEFlags;
578 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
579 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
580 if (fQuit)
581 break;
582
583
584 /* If unaligned, we drop thru and do the page crossing access
585 below. Otherwise, do the next page. */
586 if (!(uVirtAddr & (OP_SIZE - 1)))
587 continue;
588 if (uCounterReg == 0)
589 break;
590 cLeftPage = 0;
591 }
592 }
593
594 /*
595 * Fallback - slow processing till the end of the current page.
596 * In the cross page boundrary case we will end up here with cLeftPage
597 * as 0, we execute one loop then.
598 */
599 do
600 {
601 OP_TYPE uTmpValue;
602 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
603 if (rcStrict != VINF_SUCCESS)
604 return rcStrict;
605 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
606 pCtx->ADDR_rDI = uAddrReg += cbIncr;
607 pCtx->ADDR_rCX = --uCounterReg;
608 pCtx->eflags.u = uEFlags;
609 cLeftPage--;
610 } while ( (int32_t)cLeftPage > 0
611 && !(uEFlags & X86_EFL_ZF));
612 } while ( uCounterReg != 0
613 && !(uEFlags & X86_EFL_ZF));
614
615 /*
616 * Done.
617 */
618 iemRegAddToRip(pIemCpu, cbInstr);
619 return VINF_SUCCESS;
620}
621
622
623
624
625/**
626 * Implements 'REP MOVS'.
627 */
628IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
629{
630 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
631
632 /*
633 * Setup.
634 */
635 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
636 if (uCounterReg == 0)
637 {
638 iemRegAddToRip(pIemCpu, cbInstr);
639 return VINF_SUCCESS;
640 }
641
642 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
643 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
644 if (rcStrict != VINF_SUCCESS)
645 return rcStrict;
646
647 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
648 if (rcStrict != VINF_SUCCESS)
649 return rcStrict;
650
651 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
652 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
653 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
654
655 /*
656 * Be careful with handle bypassing.
657 */
658 if (pIemCpu->fBypassHandlers)
659 {
660 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
661 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
662 }
663
664 /*
665 * If we're reading back what we write, we have to let the verfication code
666 * to prevent a false positive.
667 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
668 */
669#ifdef IEM_VERIFICATION_MODE_FULL
670 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
671 && (cbIncr > 0
672 ? uSrcAddrReg <= uDstAddrReg
673 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
674 : uDstAddrReg <= uSrcAddrReg
675 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
676 pIemCpu->fOverlappingMovs = true;
677#endif
678
679 /*
680 * The loop.
681 */
682 do
683 {
684 /*
685 * Do segmentation and virtual page stuff.
686 */
687#if ADDR_SIZE != 64
688 ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg;
689 ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->es.u64Base + uDstAddrReg;
690#else
691 uint64_t uVirtSrcAddr = uSrcAddrReg;
692 uint64_t uVirtDstAddr = uDstAddrReg;
693#endif
694 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
695 if (cLeftSrcPage > uCounterReg)
696 cLeftSrcPage = uCounterReg;
697 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
698 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
699
700 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
701 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
702#if ADDR_SIZE != 64
703 && uSrcAddrReg < pSrcHid->u32Limit
704 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
705 && uDstAddrReg < pCtx->es.u32Limit
706 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
707#endif
708 )
709 {
710 RTGCPHYS GCPhysSrcMem;
711 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
712 if (rcStrict != VINF_SUCCESS)
713 return rcStrict;
714
715 RTGCPHYS GCPhysDstMem;
716 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
717 if (rcStrict != VINF_SUCCESS)
718 return rcStrict;
719
720 /*
721 * If we can map the page without trouble, do a block processing
722 * until the end of the current page.
723 */
724 PGMPAGEMAPLOCK PgLockDstMem;
725 OP_TYPE *puDstMem;
726 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
727 if (rcStrict == VINF_SUCCESS)
728 {
729 PGMPAGEMAPLOCK PgLockSrcMem;
730 OP_TYPE const *puSrcMem;
731 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
732 if (rcStrict == VINF_SUCCESS)
733 {
734 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
735 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
736
737 /* Perform the operation exactly (don't use memcpy to avoid
738 having to consider how its implementation would affect
739 any overlapping source and destination area). */
740 OP_TYPE const *puSrcCur = puSrcMem;
741 OP_TYPE *puDstCur = puDstMem;
742 uint32_t cTodo = cLeftPage;
743 while (cTodo-- > 0)
744 *puDstCur++ = *puSrcCur++;
745
746 /* Update the registers. */
747 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
748 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
749 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
750
751 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
752 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
753 continue;
754 }
755 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
756 }
757 }
758
759 /*
760 * Fallback - slow processing till the end of the current page.
761 * In the cross page boundrary case we will end up here with cLeftPage
762 * as 0, we execute one loop then.
763 */
764 do
765 {
766 OP_TYPE uValue;
767 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
768 if (rcStrict != VINF_SUCCESS)
769 return rcStrict;
770 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
771 if (rcStrict != VINF_SUCCESS)
772 return rcStrict;
773
774 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
775 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
776 pCtx->ADDR_rCX = --uCounterReg;
777 cLeftPage--;
778 } while ((int32_t)cLeftPage > 0);
779 } while (uCounterReg != 0);
780
781 /*
782 * Done.
783 */
784 iemRegAddToRip(pIemCpu, cbInstr);
785 return VINF_SUCCESS;
786}
787
788
789/**
790 * Implements 'REP STOS'.
791 */
792IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
793{
794 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
795
796 /*
797 * Setup.
798 */
799 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
800 if (uCounterReg == 0)
801 {
802 iemRegAddToRip(pIemCpu, cbInstr);
803 return VINF_SUCCESS;
804 }
805
806 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
807 if (rcStrict != VINF_SUCCESS)
808 return rcStrict;
809
810 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
811 OP_TYPE const uValue = pCtx->OP_rAX;
812 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
813
814 /*
815 * Be careful with handle bypassing.
816 */
817 /** @todo Permit doing a page if correctly aligned. */
818 if (pIemCpu->fBypassHandlers)
819 {
820 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
821 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
822 }
823
824 /*
825 * The loop.
826 */
827 do
828 {
829 /*
830 * Do segmentation and virtual page stuff.
831 */
832#if ADDR_SIZE != 64
833 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
834#else
835 uint64_t uVirtAddr = uAddrReg;
836#endif
837 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
838 if (cLeftPage > uCounterReg)
839 cLeftPage = uCounterReg;
840 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
841 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
842#if ADDR_SIZE != 64
843 && uAddrReg < pCtx->es.u32Limit
844 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
845#endif
846 )
847 {
848 RTGCPHYS GCPhysMem;
849 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
850 if (rcStrict != VINF_SUCCESS)
851 return rcStrict;
852
853 /*
854 * If we can map the page without trouble, do a block processing
855 * until the end of the current page.
856 */
857 PGMPAGEMAPLOCK PgLockMem;
858 OP_TYPE *puMem;
859 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
860 if (rcStrict == VINF_SUCCESS)
861 {
862 /* Update the regs first so we can loop on cLeftPage. */
863 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
864 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
865
866 /* Do the memsetting. */
867#if OP_SIZE == 8
868 memset(puMem, uValue, cLeftPage);
869/*#elif OP_SIZE == 32
870 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
871#else
872 while (cLeftPage-- > 0)
873 *puMem++ = uValue;
874#endif
875
876 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
877
878 /* If unaligned, we drop thru and do the page crossing access
879 below. Otherwise, do the next page. */
880 if (!(uVirtAddr & (OP_SIZE - 1)))
881 continue;
882 if (uCounterReg == 0)
883 break;
884 cLeftPage = 0;
885 }
886 }
887
888 /*
889 * Fallback - slow processing till the end of the current page.
890 * In the cross page boundrary case we will end up here with cLeftPage
891 * as 0, we execute one loop then.
892 */
893 do
894 {
895 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
896 if (rcStrict != VINF_SUCCESS)
897 return rcStrict;
898 pCtx->ADDR_rDI = uAddrReg += cbIncr;
899 pCtx->ADDR_rCX = --uCounterReg;
900 cLeftPage--;
901 } while ((int32_t)cLeftPage > 0);
902 } while (uCounterReg != 0);
903
904 /*
905 * Done.
906 */
907 iemRegAddToRip(pIemCpu, cbInstr);
908 return VINF_SUCCESS;
909}
910
911
912/**
913 * Implements 'REP LODS'.
914 */
915IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
916{
917 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
918
919 /*
920 * Setup.
921 */
922 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
923 if (uCounterReg == 0)
924 {
925 iemRegAddToRip(pIemCpu, cbInstr);
926 return VINF_SUCCESS;
927 }
928
929 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
930 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
931 if (rcStrict != VINF_SUCCESS)
932 return rcStrict;
933
934 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
935 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
936
937 /*
938 * The loop.
939 */
940 do
941 {
942 /*
943 * Do segmentation and virtual page stuff.
944 */
945#if ADDR_SIZE != 64
946 ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg;
947#else
948 uint64_t uVirtAddr = uAddrReg;
949#endif
950 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
951 if (cLeftPage > uCounterReg)
952 cLeftPage = uCounterReg;
953 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
954 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
955#if ADDR_SIZE != 64
956 && uAddrReg < pSrcHid->u32Limit
957 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
958#endif
959 )
960 {
961 RTGCPHYS GCPhysMem;
962 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
963 if (rcStrict != VINF_SUCCESS)
964 return rcStrict;
965
966 /*
967 * If we can map the page without trouble, we can get away with
968 * just reading the last value on the page.
969 */
970 PGMPAGEMAPLOCK PgLockMem;
971 OP_TYPE const *puMem;
972 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
973 if (rcStrict == VINF_SUCCESS)
974 {
975 /* Only get the last byte, the rest doesn't matter in direct access mode. */
976#if OP_SIZE == 32
977 pCtx->rax = puMem[cLeftPage - 1];
978#else
979 pCtx->OP_rAX = puMem[cLeftPage - 1];
980#endif
981 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
982 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
983 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
984
985 /* If unaligned, we drop thru and do the page crossing access
986 below. Otherwise, do the next page. */
987 if (!(uVirtAddr & (OP_SIZE - 1)))
988 continue;
989 if (uCounterReg == 0)
990 break;
991 cLeftPage = 0;
992 }
993 }
994
995 /*
996 * Fallback - slow processing till the end of the current page.
997 * In the cross page boundrary case we will end up here with cLeftPage
998 * as 0, we execute one loop then.
999 */
1000 do
1001 {
1002 OP_TYPE uTmpValue;
1003 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
1004 if (rcStrict != VINF_SUCCESS)
1005 return rcStrict;
1006#if OP_SIZE == 32
1007 pCtx->rax = uTmpValue;
1008#else
1009 pCtx->OP_rAX = uTmpValue;
1010#endif
1011 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1012 pCtx->ADDR_rCX = --uCounterReg;
1013 cLeftPage--;
1014 } while ((int32_t)cLeftPage > 0);
1015 if (rcStrict != VINF_SUCCESS)
1016 break;
1017 } while (uCounterReg != 0);
1018
1019 /*
1020 * Done.
1021 */
1022 iemRegAddToRip(pIemCpu, cbInstr);
1023 return VINF_SUCCESS;
1024}
1025
1026
1027#if OP_SIZE != 64
1028
1029/**
1030 * Implements 'INS' (no rep)
1031 */
1032IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1033{
1034 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1035 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1036 VBOXSTRICTRC rcStrict;
1037
1038 /*
1039 * Be careful with handle bypassing.
1040 */
1041 if (pIemCpu->fBypassHandlers)
1042 {
1043 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1044 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1045 }
1046
1047 /*
1048 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1049 * segmentation and finally any #PF due to virtual address translation.
1050 * ASSUMES nothing is read from the I/O port before traps are taken.
1051 */
1052 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1053 if (rcStrict != VINF_SUCCESS)
1054 return rcStrict;
1055
1056 OP_TYPE *puMem;
1057 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1058 if (rcStrict != VINF_SUCCESS)
1059 return rcStrict;
1060
1061 uint32_t u32Value;
1062 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1063 rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8);
1064 else
1065 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1066 if (IOM_SUCCESS(rcStrict))
1067 {
1068 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1069 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1070 {
1071 if (!pCtx->eflags.Bits.u1DF)
1072 pCtx->ADDR_rDI += OP_SIZE / 8;
1073 else
1074 pCtx->ADDR_rDI -= OP_SIZE / 8;
1075 iemRegAddToRip(pIemCpu, cbInstr);
1076 }
1077 /* iemMemMap already check permissions, so this may only be real errors
1078 or access handlers medling. The access handler case is going to
1079 cause misbehavior if the instruction is re-interpreted or smth. So,
1080 we fail with an internal error here instead. */
1081 else
1082 AssertLogRelFailedReturn(VERR_IEM_IPE_1);
1083 }
1084 return rcStrict;
1085}
1086
1087
1088/**
1089 * Implements 'REP INS'.
1090 */
1091IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1092{
1093 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1094 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1095
1096 /*
1097 * Setup.
1098 */
1099 uint16_t const u16Port = pCtx->dx;
1100 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1101 if (rcStrict != VINF_SUCCESS)
1102 return rcStrict;
1103
1104 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1105 if (uCounterReg == 0)
1106 {
1107 iemRegAddToRip(pIemCpu, cbInstr);
1108 return VINF_SUCCESS;
1109 }
1110
1111 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
1112 if (rcStrict != VINF_SUCCESS)
1113 return rcStrict;
1114
1115 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1116 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1117
1118 /*
1119 * Be careful with handle bypassing.
1120 */
1121 if (pIemCpu->fBypassHandlers)
1122 {
1123 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1124 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1125 }
1126
1127 /*
1128 * The loop.
1129 */
1130 do
1131 {
1132 /*
1133 * Do segmentation and virtual page stuff.
1134 */
1135#if ADDR_SIZE != 64
1136 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
1137#else
1138 uint64_t uVirtAddr = uAddrReg;
1139#endif
1140 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1141 if (cLeftPage > uCounterReg)
1142 cLeftPage = uCounterReg;
1143 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1144 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1145#if ADDR_SIZE != 64
1146 && uAddrReg < pCtx->es.u32Limit
1147 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
1148#endif
1149 )
1150 {
1151 RTGCPHYS GCPhysMem;
1152 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1153 if (rcStrict != VINF_SUCCESS)
1154 return rcStrict;
1155
1156 /*
1157 * If we can map the page without trouble, we would've liked to use
1158 * an string I/O method to do the work, but the current IOM
1159 * interface doesn't match our current approach. So, do a regular
1160 * loop instead.
1161 */
1162 /** @todo Change the I/O manager interface to make use of
1163 * mapped buffers instead of leaving those bits to the
1164 * device implementation? */
1165 PGMPAGEMAPLOCK PgLockMem;
1166 OP_TYPE *puMem;
1167 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1168 if (rcStrict == VINF_SUCCESS)
1169 {
1170 uint32_t off = 0;
1171 while (off < cLeftPage)
1172 {
1173 uint32_t u32Value;
1174 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1175 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1176 else
1177 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1178 if (IOM_SUCCESS(rcStrict))
1179 {
1180 puMem[off] = (OP_TYPE)u32Value;
1181 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1182 pCtx->ADDR_rCX = --uCounterReg;
1183 }
1184 if (rcStrict != VINF_SUCCESS)
1185 {
1186 if (IOM_SUCCESS(rcStrict))
1187 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1188 if (uCounterReg == 0)
1189 iemRegAddToRip(pIemCpu, cbInstr);
1190 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1191 return rcStrict;
1192 }
1193 off++;
1194 }
1195 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1196
1197 /* If unaligned, we drop thru and do the page crossing access
1198 below. Otherwise, do the next page. */
1199 if (!(uVirtAddr & (OP_SIZE - 1)))
1200 continue;
1201 if (uCounterReg == 0)
1202 break;
1203 cLeftPage = 0;
1204 }
1205 }
1206
1207 /*
1208 * Fallback - slow processing till the end of the current page.
1209 * In the cross page boundrary case we will end up here with cLeftPage
1210 * as 0, we execute one loop then.
1211 *
1212 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1213 * I/O port, otherwise it wouldn't really be restartable.
1214 */
1215 /** @todo investigate what the CPU actually does with \#PF/\#GP
1216 * during INS. */
1217 do
1218 {
1219 OP_TYPE *puMem;
1220 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1221 if (rcStrict != VINF_SUCCESS)
1222 return rcStrict;
1223
1224 uint32_t u32Value;
1225 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1226 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1227 else
1228 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1229 if (!IOM_SUCCESS(rcStrict))
1230 return rcStrict;
1231
1232 *puMem = (OP_TYPE)u32Value;
1233 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1234 AssertLogRelReturn(rcStrict2 == VINF_SUCCESS, VERR_IEM_IPE_1); /* See non-rep version. */
1235
1236 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1237 pCtx->ADDR_rCX = --uCounterReg;
1238
1239 cLeftPage--;
1240 if (rcStrict != VINF_SUCCESS)
1241 {
1242 if (IOM_SUCCESS(rcStrict))
1243 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1244 if (uCounterReg == 0)
1245 iemRegAddToRip(pIemCpu, cbInstr);
1246 return rcStrict;
1247 }
1248 } while ((int32_t)cLeftPage > 0);
1249 } while (uCounterReg != 0);
1250
1251 /*
1252 * Done.
1253 */
1254 iemRegAddToRip(pIemCpu, cbInstr);
1255 return VINF_SUCCESS;
1256}
1257
1258
1259/**
1260 * Implements 'OUTS' (no rep)
1261 */
1262IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1263{
1264 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1265 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1266 VBOXSTRICTRC rcStrict;
1267
1268 /*
1269 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1270 * segmentation and finally any #PF due to virtual address translation.
1271 * ASSUMES nothing is read from the I/O port before traps are taken.
1272 */
1273 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1274 if (rcStrict != VINF_SUCCESS)
1275 return rcStrict;
1276
1277 OP_TYPE uValue;
1278 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1279 if (rcStrict == VINF_SUCCESS)
1280 {
1281 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1282 rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8);
1283 else
1284 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1285 if (IOM_SUCCESS(rcStrict))
1286 {
1287 if (!pCtx->eflags.Bits.u1DF)
1288 pCtx->ADDR_rSI += OP_SIZE / 8;
1289 else
1290 pCtx->ADDR_rSI -= OP_SIZE / 8;
1291 iemRegAddToRip(pIemCpu, cbInstr);
1292 if (rcStrict != VINF_SUCCESS)
1293 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1294 }
1295 }
1296 return rcStrict;
1297}
1298
1299
1300/**
1301 * Implements 'REP OUTS'.
1302 */
1303IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1304{
1305 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1306 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1307
1308 /*
1309 * Setup.
1310 */
1311 uint16_t const u16Port = pCtx->dx;
1312 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1313 if (rcStrict != VINF_SUCCESS)
1314 return rcStrict;
1315
1316 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1317 if (uCounterReg == 0)
1318 {
1319 iemRegAddToRip(pIemCpu, cbInstr);
1320 return VINF_SUCCESS;
1321 }
1322
1323 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1324 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg);
1325 if (rcStrict != VINF_SUCCESS)
1326 return rcStrict;
1327
1328 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1329 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1330
1331 /*
1332 * The loop.
1333 */
1334 do
1335 {
1336 /*
1337 * Do segmentation and virtual page stuff.
1338 */
1339#if ADDR_SIZE != 64
1340 ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg;
1341#else
1342 uint64_t uVirtAddr = uAddrReg;
1343#endif
1344 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1345 if (cLeftPage > uCounterReg)
1346 cLeftPage = uCounterReg;
1347 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1348 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1349#if ADDR_SIZE != 64
1350 && uAddrReg < pHid->u32Limit
1351 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit
1352#endif
1353 )
1354 {
1355 RTGCPHYS GCPhysMem;
1356 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1357 if (rcStrict != VINF_SUCCESS)
1358 return rcStrict;
1359
1360 /*
1361 * If we can map the page without trouble, we would've liked to use
1362 * an string I/O method to do the work, but the current IOM
1363 * interface doesn't match our current approach. So, do a regular
1364 * loop instead.
1365 */
1366 /** @todo Change the I/O manager interface to make use of
1367 * mapped buffers instead of leaving those bits to the
1368 * device implementation? */
1369 PGMPAGEMAPLOCK PgLockMem;
1370 OP_TYPE const *puMem;
1371 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1372 if (rcStrict == VINF_SUCCESS)
1373 {
1374 uint32_t off = 0;
1375 while (off < cLeftPage)
1376 {
1377 uint32_t u32Value = *puMem++;
1378 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1379 rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8);
1380 else
1381 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1382 if (IOM_SUCCESS(rcStrict))
1383 {
1384 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1385 pCtx->ADDR_rCX = --uCounterReg;
1386 }
1387 if (rcStrict != VINF_SUCCESS)
1388 {
1389 if (IOM_SUCCESS(rcStrict))
1390 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1391 if (uCounterReg == 0)
1392 iemRegAddToRip(pIemCpu, cbInstr);
1393 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1394 return rcStrict;
1395 }
1396 off++;
1397 }
1398 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1399
1400 /* If unaligned, we drop thru and do the page crossing access
1401 below. Otherwise, do the next page. */
1402 if (!(uVirtAddr & (OP_SIZE - 1)))
1403 continue;
1404 if (uCounterReg == 0)
1405 break;
1406 cLeftPage = 0;
1407 }
1408 }
1409
1410 /*
1411 * Fallback - slow processing till the end of the current page.
1412 * In the cross page boundrary case we will end up here with cLeftPage
1413 * as 0, we execute one loop then.
1414 *
1415 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1416 * I/O port, otherwise it wouldn't really be restartable.
1417 */
1418 /** @todo investigate what the CPU actually does with \#PF/\#GP
1419 * during INS. */
1420 do
1421 {
1422 OP_TYPE uValue;
1423 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1424 if (rcStrict != VINF_SUCCESS)
1425 return rcStrict;
1426
1427 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1428 rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8);
1429 else
1430 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1431 if (IOM_SUCCESS(rcStrict))
1432 {
1433 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1434 pCtx->ADDR_rCX = --uCounterReg;
1435 cLeftPage--;
1436 }
1437 if (rcStrict != VINF_SUCCESS)
1438 {
1439 if (IOM_SUCCESS(rcStrict))
1440 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1441 if (uCounterReg == 0)
1442 iemRegAddToRip(pIemCpu, cbInstr);
1443 return rcStrict;
1444 }
1445 } while ((int32_t)cLeftPage > 0);
1446 } while (uCounterReg != 0);
1447
1448 /*
1449 * Done.
1450 */
1451 iemRegAddToRip(pIemCpu, cbInstr);
1452 return VINF_SUCCESS;
1453}
1454
1455#endif /* OP_SIZE != 64-bit */
1456
1457
1458#undef OP_rAX
1459#undef OP_SIZE
1460#undef ADDR_SIZE
1461#undef ADDR_rDI
1462#undef ADDR_rSI
1463#undef ADDR_rCX
1464#undef ADDR_rIP
1465#undef ADDR2_TYPE
1466#undef ADDR_TYPE
1467#undef ADDR2_TYPE
1468
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use