[36768] | 1 | /* $Id: IEMAllCImplStrInstr.cpp.h 42761 2012-08-10 18:23:20Z vboxsync $ */
|
---|
| 2 | /** @file
|
---|
| 3 | * IEM - String Instruction Implementation Code Template.
|
---|
| 4 | */
|
---|
| 5 |
|
---|
| 6 | /*
|
---|
[42453] | 7 | * Copyright (C) 2011-2012 Oracle Corporation
|
---|
[36768] | 8 | *
|
---|
| 9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
| 10 | * available from http://www.virtualbox.org. This file is free software;
|
---|
| 11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
| 12 | * General Public License (GPL) as published by the Free Software
|
---|
| 13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
| 14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
| 15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
| 16 | */
|
---|
| 17 |
|
---|
| 18 |
|
---|
| 19 | /*******************************************************************************
|
---|
| 20 | * Defined Constants And Macros *
|
---|
| 21 | *******************************************************************************/
|
---|
| 22 | #if OP_SIZE == 8
|
---|
| 23 | # define OP_rAX al
|
---|
| 24 | #elif OP_SIZE == 16
|
---|
| 25 | # define OP_rAX ax
|
---|
| 26 | #elif OP_SIZE == 32
|
---|
| 27 | # define OP_rAX eax
|
---|
| 28 | #elif OP_SIZE == 64
|
---|
| 29 | # define OP_rAX rax
|
---|
| 30 | #else
|
---|
| 31 | # error "Bad OP_SIZE."
|
---|
| 32 | #endif
|
---|
| 33 | #define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
|
---|
| 34 |
|
---|
| 35 | #if ADDR_SIZE == 16
|
---|
| 36 | # define ADDR_rDI di
|
---|
| 37 | # define ADDR_rSI si
|
---|
| 38 | # define ADDR_rCX cx
|
---|
| 39 | # define ADDR2_TYPE uint32_t
|
---|
| 40 | #elif ADDR_SIZE == 32
|
---|
| 41 | # define ADDR_rDI edi
|
---|
| 42 | # define ADDR_rSI esi
|
---|
| 43 | # define ADDR_rCX ecx
|
---|
| 44 | # define ADDR2_TYPE uint32_t
|
---|
| 45 | #elif ADDR_SIZE == 64
|
---|
| 46 | # define ADDR_rDI rdi
|
---|
| 47 | # define ADDR_rSI rsi
|
---|
| 48 | # define ADDR_rCX rcx
|
---|
| 49 | # define ADDR2_TYPE uint64_t
|
---|
| 50 | #else
|
---|
| 51 | # error "Bad ADDR_SIZE."
|
---|
| 52 | #endif
|
---|
| 53 | #define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
|
---|
| 54 |
|
---|
| 55 |
|
---|
[36829] | 56 | /**
|
---|
| 57 | * Implements 'REPE CMPS'.
|
---|
| 58 | */
|
---|
| 59 | IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
|
---|
| 60 | {
|
---|
| 61 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
[36768] | 62 |
|
---|
[36829] | 63 | /*
|
---|
| 64 | * Setup.
|
---|
| 65 | */
|
---|
[39958] | 66 | ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
|
---|
[36829] | 67 | if (uCounterReg == 0)
|
---|
[36833] | 68 | {
|
---|
| 69 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[36829] | 70 | return VINF_SUCCESS;
|
---|
[36833] | 71 | }
|
---|
[36829] | 72 |
|
---|
[39958] | 73 | PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
|
---|
| 74 | VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
|
---|
[36829] | 75 | if (rcStrict != VINF_SUCCESS)
|
---|
| 76 | return rcStrict;
|
---|
| 77 |
|
---|
[41906] | 78 | rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
|
---|
[36829] | 79 | if (rcStrict != VINF_SUCCESS)
|
---|
| 80 | return rcStrict;
|
---|
| 81 |
|
---|
[39958] | 82 | int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
|
---|
[36829] | 83 | ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
|
---|
| 84 | ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
|
---|
[39958] | 85 | uint32_t uEFlags = pCtx->eflags.u;
|
---|
[36829] | 86 |
|
---|
| 87 | /*
|
---|
| 88 | * The loop.
|
---|
| 89 | */
|
---|
| 90 | do
|
---|
| 91 | {
|
---|
| 92 | /*
|
---|
| 93 | * Do segmentation and virtual page stuff.
|
---|
| 94 | */
|
---|
| 95 | #if ADDR_SIZE != 64
|
---|
[41906] | 96 | ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
|
---|
| 97 | ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
|
---|
[36829] | 98 | #else
|
---|
| 99 | uint64_t uVirtSrc1Addr = uSrc1AddrReg;
|
---|
| 100 | uint64_t uVirtSrc2Addr = uSrc2AddrReg;
|
---|
| 101 | #endif
|
---|
| 102 | uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 103 | if (cLeftSrc1Page > uCounterReg)
|
---|
| 104 | cLeftSrc1Page = uCounterReg;
|
---|
| 105 | uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
[39958] | 106 | uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
|
---|
[36829] | 107 |
|
---|
| 108 | if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
|
---|
| 109 | && cbIncr > 0 /** @todo Implement reverse direction string ops. */
|
---|
| 110 | #if ADDR_SIZE != 64
|
---|
| 111 | && uSrc1AddrReg < pSrc1Hid->u32Limit
|
---|
| 112 | && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
|
---|
[41906] | 113 | && uSrc2AddrReg < pCtx->es.u32Limit
|
---|
| 114 | && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
|
---|
[36829] | 115 | #endif
|
---|
| 116 | )
|
---|
| 117 | {
|
---|
| 118 | RTGCPHYS GCPhysSrc1Mem;
|
---|
| 119 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
|
---|
| 120 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 121 | return rcStrict;
|
---|
[36829] | 122 |
|
---|
| 123 | RTGCPHYS GCPhysSrc2Mem;
|
---|
| 124 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
|
---|
| 125 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 126 | return rcStrict;
|
---|
[36829] | 127 |
|
---|
| 128 | /*
|
---|
| 129 | * If we can map the page without trouble, do a block processing
|
---|
| 130 | * until the end of the current page.
|
---|
| 131 | */
|
---|
[42193] | 132 | PGMPAGEMAPLOCK PgLockSrc2Mem;
|
---|
[36829] | 133 | OP_TYPE const *puSrc2Mem;
|
---|
[42193] | 134 | rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
|
---|
[36829] | 135 | if (rcStrict == VINF_SUCCESS)
|
---|
| 136 | {
|
---|
[42193] | 137 | PGMPAGEMAPLOCK PgLockSrc1Mem;
|
---|
[36829] | 138 | OP_TYPE const *puSrc1Mem;
|
---|
[42193] | 139 | rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
|
---|
[36829] | 140 | if (rcStrict == VINF_SUCCESS)
|
---|
| 141 | {
|
---|
| 142 | if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
|
---|
| 143 | {
|
---|
| 144 | /* All matches, only compare the last itme to get the right eflags. */
|
---|
| 145 | RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
|
---|
| 146 | uSrc1AddrReg += cLeftPage * cbIncr;
|
---|
| 147 | uSrc2AddrReg += cLeftPage * cbIncr;
|
---|
[39958] | 148 | uCounterReg -= cLeftPage;
|
---|
[36829] | 149 | }
|
---|
| 150 | else
|
---|
| 151 | {
|
---|
| 152 | /* Some mismatch, compare each item (and keep volatile
|
---|
| 153 | memory in mind). */
|
---|
[39958] | 154 | uint32_t off = 0;
|
---|
[36829] | 155 | do
|
---|
| 156 | {
|
---|
[39958] | 157 | RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
|
---|
| 158 | off++;
|
---|
| 159 | } while ( off < cLeftPage
|
---|
[36829] | 160 | && (uEFlags & X86_EFL_ZF));
|
---|
[39958] | 161 | uSrc1AddrReg += cbIncr * off;
|
---|
| 162 | uSrc2AddrReg += cbIncr * off;
|
---|
| 163 | uCounterReg -= off;
|
---|
[36829] | 164 | }
|
---|
[39958] | 165 |
|
---|
| 166 | /* Update the registers before looping. */
|
---|
| 167 | pCtx->ADDR_rCX = uCounterReg;
|
---|
| 168 | pCtx->ADDR_rSI = uSrc1AddrReg;
|
---|
| 169 | pCtx->ADDR_rDI = uSrc2AddrReg;
|
---|
| 170 | pCtx->eflags.u = uEFlags;
|
---|
| 171 |
|
---|
[42193] | 172 | iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
|
---|
| 173 | iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
|
---|
[36829] | 174 | continue;
|
---|
| 175 | }
|
---|
| 176 | }
|
---|
[42193] | 177 | iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
|
---|
[36829] | 178 | }
|
---|
| 179 |
|
---|
| 180 | /*
|
---|
| 181 | * Fallback - slow processing till the end of the current page.
|
---|
| 182 | * In the cross page boundrary case we will end up here with cLeftPage
|
---|
| 183 | * as 0, we execute one loop then.
|
---|
| 184 | */
|
---|
| 185 | do
|
---|
| 186 | {
|
---|
| 187 | OP_TYPE uValue1;
|
---|
| 188 | rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
|
---|
| 189 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 190 | return rcStrict;
|
---|
[36829] | 191 | OP_TYPE uValue2;
|
---|
| 192 | rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
|
---|
| 193 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 194 | return rcStrict;
|
---|
[36829] | 195 | RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
|
---|
| 196 |
|
---|
[39958] | 197 | pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
|
---|
| 198 | pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
|
---|
| 199 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
| 200 | pCtx->eflags.u = uEFlags;
|
---|
[36829] | 201 | cLeftPage--;
|
---|
| 202 | } while ( (int32_t)cLeftPage > 0
|
---|
| 203 | && (uEFlags & X86_EFL_ZF));
|
---|
| 204 | } while ( uCounterReg != 0
|
---|
| 205 | && (uEFlags & X86_EFL_ZF));
|
---|
| 206 |
|
---|
| 207 | /*
|
---|
[39958] | 208 | * Done.
|
---|
[36829] | 209 | */
|
---|
[39958] | 210 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 211 | return VINF_SUCCESS;
|
---|
[36829] | 212 | }
|
---|
| 213 |
|
---|
| 214 |
|
---|
[36768] | 215 | /**
|
---|
[36829] | 216 | * Implements 'REPNE CMPS'.
|
---|
| 217 | */
|
---|
| 218 | IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
|
---|
| 219 | {
|
---|
| 220 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
| 221 |
|
---|
| 222 | /*
|
---|
| 223 | * Setup.
|
---|
| 224 | */
|
---|
| 225 | ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
|
---|
| 226 | if (uCounterReg == 0)
|
---|
[36833] | 227 | {
|
---|
| 228 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[36829] | 229 | return VINF_SUCCESS;
|
---|
[36833] | 230 | }
|
---|
[36829] | 231 |
|
---|
| 232 | PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
|
---|
| 233 | VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
|
---|
| 234 | if (rcStrict != VINF_SUCCESS)
|
---|
| 235 | return rcStrict;
|
---|
| 236 |
|
---|
[41906] | 237 | rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
|
---|
[36829] | 238 | if (rcStrict != VINF_SUCCESS)
|
---|
| 239 | return rcStrict;
|
---|
| 240 |
|
---|
| 241 | int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
|
---|
| 242 | ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
|
---|
| 243 | ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
|
---|
| 244 | uint32_t uEFlags = pCtx->eflags.u;
|
---|
| 245 |
|
---|
| 246 | /*
|
---|
| 247 | * The loop.
|
---|
| 248 | */
|
---|
| 249 | do
|
---|
| 250 | {
|
---|
| 251 | /*
|
---|
| 252 | * Do segmentation and virtual page stuff.
|
---|
| 253 | */
|
---|
| 254 | #if ADDR_SIZE != 64
|
---|
[41906] | 255 | ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
|
---|
| 256 | ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
|
---|
[36829] | 257 | #else
|
---|
| 258 | uint64_t uVirtSrc1Addr = uSrc1AddrReg;
|
---|
| 259 | uint64_t uVirtSrc2Addr = uSrc2AddrReg;
|
---|
| 260 | #endif
|
---|
| 261 | uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 262 | if (cLeftSrc1Page > uCounterReg)
|
---|
| 263 | cLeftSrc1Page = uCounterReg;
|
---|
| 264 | uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 265 | uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
|
---|
| 266 |
|
---|
| 267 | if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
|
---|
| 268 | && cbIncr > 0 /** @todo Implement reverse direction string ops. */
|
---|
| 269 | #if ADDR_SIZE != 64
|
---|
| 270 | && uSrc1AddrReg < pSrc1Hid->u32Limit
|
---|
| 271 | && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
|
---|
[41906] | 272 | && uSrc2AddrReg < pCtx->es.u32Limit
|
---|
| 273 | && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
|
---|
[36829] | 274 | #endif
|
---|
| 275 | )
|
---|
| 276 | {
|
---|
| 277 | RTGCPHYS GCPhysSrc1Mem;
|
---|
| 278 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
|
---|
| 279 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 280 | return rcStrict;
|
---|
[36829] | 281 |
|
---|
| 282 | RTGCPHYS GCPhysSrc2Mem;
|
---|
| 283 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
|
---|
| 284 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 285 | return rcStrict;
|
---|
[36829] | 286 |
|
---|
| 287 | /*
|
---|
| 288 | * If we can map the page without trouble, do a block processing
|
---|
| 289 | * until the end of the current page.
|
---|
| 290 | */
|
---|
| 291 | OP_TYPE const *puSrc2Mem;
|
---|
[42193] | 292 | PGMPAGEMAPLOCK PgLockSrc2Mem;
|
---|
| 293 | rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
|
---|
[36829] | 294 | if (rcStrict == VINF_SUCCESS)
|
---|
| 295 | {
|
---|
| 296 | OP_TYPE const *puSrc1Mem;
|
---|
[42193] | 297 | PGMPAGEMAPLOCK PgLockSrc1Mem;
|
---|
| 298 | rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
|
---|
[36829] | 299 | if (rcStrict == VINF_SUCCESS)
|
---|
| 300 | {
|
---|
| 301 | if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
|
---|
| 302 | {
|
---|
[39958] | 303 | /* All matches, only compare the last item to get the right eflags. */
|
---|
[36829] | 304 | RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
|
---|
| 305 | uSrc1AddrReg += cLeftPage * cbIncr;
|
---|
| 306 | uSrc2AddrReg += cLeftPage * cbIncr;
|
---|
[39958] | 307 | uCounterReg -= cLeftPage;
|
---|
[36829] | 308 | }
|
---|
| 309 | else
|
---|
| 310 | {
|
---|
| 311 | /* Some mismatch, compare each item (and keep volatile
|
---|
| 312 | memory in mind). */
|
---|
[39958] | 313 | uint32_t off = 0;
|
---|
[36829] | 314 | do
|
---|
| 315 | {
|
---|
[39958] | 316 | RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
|
---|
| 317 | off++;
|
---|
| 318 | } while ( off < cLeftPage
|
---|
[36829] | 319 | && !(uEFlags & X86_EFL_ZF));
|
---|
[39958] | 320 | uSrc1AddrReg += cbIncr * off;
|
---|
| 321 | uSrc2AddrReg += cbIncr * off;
|
---|
| 322 | uCounterReg -= off;
|
---|
[36829] | 323 | }
|
---|
[39958] | 324 |
|
---|
| 325 | /* Update the registers before looping. */
|
---|
| 326 | pCtx->ADDR_rCX = uCounterReg;
|
---|
| 327 | pCtx->ADDR_rSI = uSrc1AddrReg;
|
---|
| 328 | pCtx->ADDR_rDI = uSrc2AddrReg;
|
---|
| 329 | pCtx->eflags.u = uEFlags;
|
---|
| 330 |
|
---|
[42193] | 331 | iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
|
---|
| 332 | iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
|
---|
[36829] | 333 | continue;
|
---|
| 334 | }
|
---|
[42193] | 335 | iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
|
---|
[36829] | 336 | }
|
---|
| 337 | }
|
---|
| 338 |
|
---|
| 339 | /*
|
---|
| 340 | * Fallback - slow processing till the end of the current page.
|
---|
| 341 | * In the cross page boundrary case we will end up here with cLeftPage
|
---|
| 342 | * as 0, we execute one loop then.
|
---|
| 343 | */
|
---|
| 344 | do
|
---|
| 345 | {
|
---|
| 346 | OP_TYPE uValue1;
|
---|
| 347 | rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
|
---|
| 348 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 349 | return rcStrict;
|
---|
[36829] | 350 | OP_TYPE uValue2;
|
---|
| 351 | rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
|
---|
| 352 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 353 | return rcStrict;
|
---|
[36829] | 354 | RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
|
---|
| 355 |
|
---|
[39958] | 356 | pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
|
---|
| 357 | pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
|
---|
| 358 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
| 359 | pCtx->eflags.u = uEFlags;
|
---|
[36829] | 360 | cLeftPage--;
|
---|
| 361 | } while ( (int32_t)cLeftPage > 0
|
---|
| 362 | && !(uEFlags & X86_EFL_ZF));
|
---|
| 363 | } while ( uCounterReg != 0
|
---|
| 364 | && !(uEFlags & X86_EFL_ZF));
|
---|
| 365 |
|
---|
| 366 | /*
|
---|
[39958] | 367 | * Done.
|
---|
[36829] | 368 | */
|
---|
[39958] | 369 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 370 | return VINF_SUCCESS;
|
---|
[36829] | 371 | }
|
---|
| 372 |
|
---|
| 373 |
|
---|
| 374 | /**
|
---|
| 375 | * Implements 'REPE SCAS'.
|
---|
| 376 | */
|
---|
| 377 | IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
|
---|
| 378 | {
|
---|
| 379 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
| 380 |
|
---|
| 381 | /*
|
---|
| 382 | * Setup.
|
---|
| 383 | */
|
---|
| 384 | ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
|
---|
| 385 | if (uCounterReg == 0)
|
---|
[36833] | 386 | {
|
---|
| 387 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[36829] | 388 | return VINF_SUCCESS;
|
---|
[36833] | 389 | }
|
---|
[36829] | 390 |
|
---|
[41906] | 391 | VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
|
---|
[36829] | 392 | if (rcStrict != VINF_SUCCESS)
|
---|
| 393 | return rcStrict;
|
---|
| 394 |
|
---|
| 395 | int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
|
---|
| 396 | OP_TYPE const uValueReg = pCtx->OP_rAX;
|
---|
| 397 | ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
|
---|
| 398 | uint32_t uEFlags = pCtx->eflags.u;
|
---|
| 399 |
|
---|
| 400 | /*
|
---|
| 401 | * The loop.
|
---|
| 402 | */
|
---|
| 403 | do
|
---|
| 404 | {
|
---|
| 405 | /*
|
---|
| 406 | * Do segmentation and virtual page stuff.
|
---|
| 407 | */
|
---|
| 408 | #if ADDR_SIZE != 64
|
---|
[41906] | 409 | ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
|
---|
[36829] | 410 | #else
|
---|
| 411 | uint64_t uVirtAddr = uAddrReg;
|
---|
| 412 | #endif
|
---|
| 413 | uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 414 | if (cLeftPage > uCounterReg)
|
---|
| 415 | cLeftPage = uCounterReg;
|
---|
| 416 | if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
|
---|
| 417 | && cbIncr > 0 /** @todo Implement reverse direction string ops. */
|
---|
| 418 | #if ADDR_SIZE != 64
|
---|
[41906] | 419 | && uAddrReg < pCtx->es.u32Limit
|
---|
| 420 | && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
|
---|
[36829] | 421 | #endif
|
---|
| 422 | )
|
---|
| 423 | {
|
---|
| 424 | RTGCPHYS GCPhysMem;
|
---|
| 425 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
|
---|
| 426 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 427 | return rcStrict;
|
---|
[36829] | 428 |
|
---|
| 429 | /*
|
---|
| 430 | * If we can map the page without trouble, do a block processing
|
---|
| 431 | * until the end of the current page.
|
---|
| 432 | */
|
---|
[42193] | 433 | PGMPAGEMAPLOCK PgLockMem;
|
---|
[36829] | 434 | OP_TYPE const *puMem;
|
---|
[42193] | 435 | rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
|
---|
[36829] | 436 | if (rcStrict == VINF_SUCCESS)
|
---|
| 437 | {
|
---|
| 438 | /* Search till we find a mismatching item. */
|
---|
| 439 | OP_TYPE uTmpValue;
|
---|
| 440 | bool fQuit;
|
---|
| 441 | uint32_t i = 0;
|
---|
| 442 | do
|
---|
| 443 | {
|
---|
| 444 | uTmpValue = puMem[i++];
|
---|
| 445 | fQuit = uTmpValue != uValueReg;
|
---|
| 446 | } while (i < cLeftPage && !fQuit);
|
---|
| 447 |
|
---|
| 448 | /* Update the regs. */
|
---|
| 449 | RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
|
---|
[39958] | 450 | pCtx->ADDR_rCX = uCounterReg -= i;
|
---|
| 451 | pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
|
---|
| 452 | pCtx->eflags.u = uEFlags;
|
---|
[42633] | 453 | Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
|
---|
[42193] | 454 | iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
|
---|
[36829] | 455 | if (fQuit)
|
---|
| 456 | break;
|
---|
| 457 |
|
---|
| 458 |
|
---|
| 459 | /* If unaligned, we drop thru and do the page crossing access
|
---|
| 460 | below. Otherwise, do the next page. */
|
---|
| 461 | if (!(uVirtAddr & (OP_SIZE - 1)))
|
---|
| 462 | continue;
|
---|
| 463 | if (uCounterReg == 0)
|
---|
| 464 | break;
|
---|
| 465 | cLeftPage = 0;
|
---|
| 466 | }
|
---|
| 467 | }
|
---|
| 468 |
|
---|
| 469 | /*
|
---|
| 470 | * Fallback - slow processing till the end of the current page.
|
---|
| 471 | * In the cross page boundrary case we will end up here with cLeftPage
|
---|
| 472 | * as 0, we execute one loop then.
|
---|
| 473 | */
|
---|
| 474 | do
|
---|
| 475 | {
|
---|
| 476 | OP_TYPE uTmpValue;
|
---|
| 477 | rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
|
---|
| 478 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 479 | return rcStrict;
|
---|
[36829] | 480 | RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
|
---|
| 481 |
|
---|
[39958] | 482 | pCtx->ADDR_rDI = uAddrReg += cbIncr;
|
---|
| 483 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
| 484 | pCtx->eflags.u = uEFlags;
|
---|
[36829] | 485 | cLeftPage--;
|
---|
| 486 | } while ( (int32_t)cLeftPage > 0
|
---|
| 487 | && (uEFlags & X86_EFL_ZF));
|
---|
| 488 | } while ( uCounterReg != 0
|
---|
| 489 | && (uEFlags & X86_EFL_ZF));
|
---|
| 490 |
|
---|
| 491 | /*
|
---|
[39958] | 492 | * Done.
|
---|
[36829] | 493 | */
|
---|
[39958] | 494 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 495 | return VINF_SUCCESS;
|
---|
[36829] | 496 | }
|
---|
| 497 |
|
---|
| 498 |
|
---|
| 499 | /**
|
---|
| 500 | * Implements 'REPNE SCAS'.
|
---|
| 501 | */
|
---|
| 502 | IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
|
---|
| 503 | {
|
---|
| 504 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
| 505 |
|
---|
| 506 | /*
|
---|
| 507 | * Setup.
|
---|
| 508 | */
|
---|
| 509 | ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
|
---|
| 510 | if (uCounterReg == 0)
|
---|
[36833] | 511 | {
|
---|
| 512 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[36829] | 513 | return VINF_SUCCESS;
|
---|
[36833] | 514 | }
|
---|
[36829] | 515 |
|
---|
[41906] | 516 | VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
|
---|
[36829] | 517 | if (rcStrict != VINF_SUCCESS)
|
---|
| 518 | return rcStrict;
|
---|
| 519 |
|
---|
| 520 | int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
|
---|
| 521 | OP_TYPE const uValueReg = pCtx->OP_rAX;
|
---|
| 522 | ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
|
---|
| 523 | uint32_t uEFlags = pCtx->eflags.u;
|
---|
| 524 |
|
---|
| 525 | /*
|
---|
| 526 | * The loop.
|
---|
| 527 | */
|
---|
| 528 | do
|
---|
| 529 | {
|
---|
| 530 | /*
|
---|
| 531 | * Do segmentation and virtual page stuff.
|
---|
| 532 | */
|
---|
| 533 | #if ADDR_SIZE != 64
|
---|
[41906] | 534 | ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
|
---|
[36829] | 535 | #else
|
---|
| 536 | uint64_t uVirtAddr = uAddrReg;
|
---|
| 537 | #endif
|
---|
| 538 | uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 539 | if (cLeftPage > uCounterReg)
|
---|
| 540 | cLeftPage = uCounterReg;
|
---|
| 541 | if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
|
---|
| 542 | && cbIncr > 0 /** @todo Implement reverse direction string ops. */
|
---|
| 543 | #if ADDR_SIZE != 64
|
---|
[41906] | 544 | && uAddrReg < pCtx->es.u32Limit
|
---|
| 545 | && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
|
---|
[36829] | 546 | #endif
|
---|
| 547 | )
|
---|
| 548 | {
|
---|
| 549 | RTGCPHYS GCPhysMem;
|
---|
| 550 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
|
---|
| 551 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 552 | return rcStrict;
|
---|
[36829] | 553 |
|
---|
| 554 | /*
|
---|
| 555 | * If we can map the page without trouble, do a block processing
|
---|
| 556 | * until the end of the current page.
|
---|
| 557 | */
|
---|
[42193] | 558 | PGMPAGEMAPLOCK PgLockMem;
|
---|
[36829] | 559 | OP_TYPE const *puMem;
|
---|
[42193] | 560 | rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
|
---|
[36829] | 561 | if (rcStrict == VINF_SUCCESS)
|
---|
| 562 | {
|
---|
| 563 | /* Search till we find a mismatching item. */
|
---|
| 564 | OP_TYPE uTmpValue;
|
---|
| 565 | bool fQuit;
|
---|
| 566 | uint32_t i = 0;
|
---|
| 567 | do
|
---|
| 568 | {
|
---|
| 569 | uTmpValue = puMem[i++];
|
---|
| 570 | fQuit = uTmpValue == uValueReg;
|
---|
| 571 | } while (i < cLeftPage && !fQuit);
|
---|
| 572 |
|
---|
| 573 | /* Update the regs. */
|
---|
| 574 | RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
|
---|
[39958] | 575 | pCtx->ADDR_rCX = uCounterReg -= i;
|
---|
| 576 | pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
|
---|
| 577 | pCtx->eflags.u = uEFlags;
|
---|
[42633] | 578 | Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
|
---|
[42193] | 579 | iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
|
---|
[36829] | 580 | if (fQuit)
|
---|
| 581 | break;
|
---|
| 582 |
|
---|
| 583 |
|
---|
| 584 | /* If unaligned, we drop thru and do the page crossing access
|
---|
| 585 | below. Otherwise, do the next page. */
|
---|
| 586 | if (!(uVirtAddr & (OP_SIZE - 1)))
|
---|
| 587 | continue;
|
---|
| 588 | if (uCounterReg == 0)
|
---|
| 589 | break;
|
---|
| 590 | cLeftPage = 0;
|
---|
| 591 | }
|
---|
| 592 | }
|
---|
| 593 |
|
---|
| 594 | /*
|
---|
| 595 | * Fallback - slow processing till the end of the current page.
|
---|
| 596 | * In the cross page boundrary case we will end up here with cLeftPage
|
---|
| 597 | * as 0, we execute one loop then.
|
---|
| 598 | */
|
---|
| 599 | do
|
---|
| 600 | {
|
---|
| 601 | OP_TYPE uTmpValue;
|
---|
| 602 | rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
|
---|
| 603 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 604 | return rcStrict;
|
---|
[36829] | 605 | RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
|
---|
[39958] | 606 | pCtx->ADDR_rDI = uAddrReg += cbIncr;
|
---|
| 607 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
| 608 | pCtx->eflags.u = uEFlags;
|
---|
[36829] | 609 | cLeftPage--;
|
---|
| 610 | } while ( (int32_t)cLeftPage > 0
|
---|
| 611 | && !(uEFlags & X86_EFL_ZF));
|
---|
| 612 | } while ( uCounterReg != 0
|
---|
| 613 | && !(uEFlags & X86_EFL_ZF));
|
---|
| 614 |
|
---|
| 615 | /*
|
---|
[39958] | 616 | * Done.
|
---|
[36829] | 617 | */
|
---|
[39958] | 618 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 619 | return VINF_SUCCESS;
|
---|
[36829] | 620 | }
|
---|
| 621 |
|
---|
| 622 |
|
---|
| 623 |
|
---|
| 624 |
|
---|
| 625 | /**
|
---|
[36768] | 626 | * Implements 'REP MOVS'.
|
---|
| 627 | */
|
---|
| 628 | IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
|
---|
| 629 | {
|
---|
| 630 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
| 631 |
|
---|
| 632 | /*
|
---|
| 633 | * Setup.
|
---|
| 634 | */
|
---|
| 635 | ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
|
---|
| 636 | if (uCounterReg == 0)
|
---|
[36833] | 637 | {
|
---|
| 638 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[36768] | 639 | return VINF_SUCCESS;
|
---|
[36833] | 640 | }
|
---|
[36768] | 641 |
|
---|
| 642 | PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
|
---|
| 643 | VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
|
---|
| 644 | if (rcStrict != VINF_SUCCESS)
|
---|
| 645 | return rcStrict;
|
---|
| 646 |
|
---|
[41906] | 647 | rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
|
---|
[36768] | 648 | if (rcStrict != VINF_SUCCESS)
|
---|
| 649 | return rcStrict;
|
---|
| 650 |
|
---|
| 651 | int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
|
---|
| 652 | ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
|
---|
| 653 | ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
|
---|
| 654 |
|
---|
| 655 | /*
|
---|
[42761] | 656 | * Be careful with handle bypassing.
|
---|
| 657 | */
|
---|
| 658 | if (pIemCpu->fBypassHandlers)
|
---|
| 659 | {
|
---|
| 660 | Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
|
---|
| 661 | return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
|
---|
| 662 | }
|
---|
| 663 |
|
---|
| 664 | /*
|
---|
[42621] | 665 | * If we're reading back what we write, we have to let the verfication code
|
---|
| 666 | * to prevent a false positive.
|
---|
| 667 | * Note! This doesn't take aliasing or wrapping into account - lazy bird.
|
---|
| 668 | */
|
---|
[42761] | 669 | #ifdef IEM_VERIFICATION_MODE_FULL
|
---|
[42621] | 670 | if ( IEM_VERIFICATION_ENABLED(pIemCpu)
|
---|
| 671 | && (cbIncr > 0
|
---|
| 672 | ? uSrcAddrReg <= uDstAddrReg
|
---|
| 673 | && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
|
---|
| 674 | : uDstAddrReg <= uSrcAddrReg
|
---|
| 675 | && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
|
---|
| 676 | pIemCpu->fOverlappingMovs = true;
|
---|
[42625] | 677 | #endif
|
---|
[42621] | 678 |
|
---|
| 679 | /*
|
---|
[36768] | 680 | * The loop.
|
---|
| 681 | */
|
---|
| 682 | do
|
---|
| 683 | {
|
---|
| 684 | /*
|
---|
| 685 | * Do segmentation and virtual page stuff.
|
---|
| 686 | */
|
---|
| 687 | #if ADDR_SIZE != 64
|
---|
[41906] | 688 | ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg;
|
---|
| 689 | ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->es.u64Base + uDstAddrReg;
|
---|
[36768] | 690 | #else
|
---|
| 691 | uint64_t uVirtSrcAddr = uSrcAddrReg;
|
---|
| 692 | uint64_t uVirtDstAddr = uDstAddrReg;
|
---|
| 693 | #endif
|
---|
| 694 | uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 695 | if (cLeftSrcPage > uCounterReg)
|
---|
| 696 | cLeftSrcPage = uCounterReg;
|
---|
| 697 | uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 698 | uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
|
---|
| 699 |
|
---|
| 700 | if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
|
---|
| 701 | && cbIncr > 0 /** @todo Implement reverse direction string ops. */
|
---|
| 702 | #if ADDR_SIZE != 64
|
---|
| 703 | && uSrcAddrReg < pSrcHid->u32Limit
|
---|
| 704 | && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
|
---|
[41906] | 705 | && uDstAddrReg < pCtx->es.u32Limit
|
---|
| 706 | && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
|
---|
[36768] | 707 | #endif
|
---|
| 708 | )
|
---|
| 709 | {
|
---|
| 710 | RTGCPHYS GCPhysSrcMem;
|
---|
| 711 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
|
---|
| 712 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 713 | return rcStrict;
|
---|
[36768] | 714 |
|
---|
| 715 | RTGCPHYS GCPhysDstMem;
|
---|
| 716 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
|
---|
| 717 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 718 | return rcStrict;
|
---|
[36768] | 719 |
|
---|
| 720 | /*
|
---|
| 721 | * If we can map the page without trouble, do a block processing
|
---|
| 722 | * until the end of the current page.
|
---|
| 723 | */
|
---|
[42193] | 724 | PGMPAGEMAPLOCK PgLockDstMem;
|
---|
[36768] | 725 | OP_TYPE *puDstMem;
|
---|
[42193] | 726 | rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
|
---|
[36768] | 727 | if (rcStrict == VINF_SUCCESS)
|
---|
| 728 | {
|
---|
[42193] | 729 | PGMPAGEMAPLOCK PgLockSrcMem;
|
---|
[36768] | 730 | OP_TYPE const *puSrcMem;
|
---|
[42193] | 731 | rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
|
---|
[36768] | 732 | if (rcStrict == VINF_SUCCESS)
|
---|
| 733 | {
|
---|
[42621] | 734 | Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
|
---|
| 735 | || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
|
---|
[36768] | 736 |
|
---|
[42621] | 737 | /* Perform the operation exactly (don't use memcpy to avoid
|
---|
| 738 | having to consider how its implementation would affect
|
---|
| 739 | any overlapping source and destination area). */
|
---|
| 740 | OP_TYPE const *puSrcCur = puSrcMem;
|
---|
| 741 | OP_TYPE *puDstCur = puDstMem;
|
---|
| 742 | uint32_t cTodo = cLeftPage;
|
---|
| 743 | while (cTodo-- > 0)
|
---|
| 744 | *puDstCur++ = *puSrcCur++;
|
---|
| 745 |
|
---|
[36768] | 746 | /* Update the registers. */
|
---|
[39958] | 747 | pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
|
---|
| 748 | pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
|
---|
| 749 | pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
|
---|
| 750 |
|
---|
[42193] | 751 | iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
|
---|
| 752 | iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
|
---|
[36768] | 753 | continue;
|
---|
| 754 | }
|
---|
[42193] | 755 | iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
|
---|
[36768] | 756 | }
|
---|
| 757 | }
|
---|
| 758 |
|
---|
| 759 | /*
|
---|
| 760 | * Fallback - slow processing till the end of the current page.
|
---|
| 761 | * In the cross page boundrary case we will end up here with cLeftPage
|
---|
| 762 | * as 0, we execute one loop then.
|
---|
| 763 | */
|
---|
| 764 | do
|
---|
| 765 | {
|
---|
| 766 | OP_TYPE uValue;
|
---|
| 767 | rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
|
---|
| 768 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 769 | return rcStrict;
|
---|
[36768] | 770 | rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
|
---|
| 771 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 772 | return rcStrict;
|
---|
[36768] | 773 |
|
---|
[39958] | 774 | pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
|
---|
| 775 | pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
|
---|
| 776 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
[36768] | 777 | cLeftPage--;
|
---|
| 778 | } while ((int32_t)cLeftPage > 0);
|
---|
| 779 | } while (uCounterReg != 0);
|
---|
| 780 |
|
---|
| 781 | /*
|
---|
[39958] | 782 | * Done.
|
---|
[36768] | 783 | */
|
---|
[39958] | 784 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 785 | return VINF_SUCCESS;
|
---|
[36768] | 786 | }
|
---|
| 787 |
|
---|
| 788 |
|
---|
| 789 | /**
|
---|
| 790 | * Implements 'REP STOS'.
|
---|
| 791 | */
|
---|
| 792 | IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
|
---|
| 793 | {
|
---|
| 794 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
| 795 |
|
---|
| 796 | /*
|
---|
| 797 | * Setup.
|
---|
| 798 | */
|
---|
| 799 | ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
|
---|
| 800 | if (uCounterReg == 0)
|
---|
[36833] | 801 | {
|
---|
| 802 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[36768] | 803 | return VINF_SUCCESS;
|
---|
[36833] | 804 | }
|
---|
[36768] | 805 |
|
---|
[41906] | 806 | VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
|
---|
[36768] | 807 | if (rcStrict != VINF_SUCCESS)
|
---|
| 808 | return rcStrict;
|
---|
| 809 |
|
---|
| 810 | int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
|
---|
| 811 | OP_TYPE const uValue = pCtx->OP_rAX;
|
---|
| 812 | ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
|
---|
| 813 |
|
---|
| 814 | /*
|
---|
[42761] | 815 | * Be careful with handle bypassing.
|
---|
| 816 | */
|
---|
| 817 | /** @todo Permit doing a page if correctly aligned. */
|
---|
| 818 | if (pIemCpu->fBypassHandlers)
|
---|
| 819 | {
|
---|
| 820 | Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
|
---|
| 821 | return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
|
---|
| 822 | }
|
---|
| 823 |
|
---|
| 824 | /*
|
---|
[36768] | 825 | * The loop.
|
---|
| 826 | */
|
---|
| 827 | do
|
---|
| 828 | {
|
---|
| 829 | /*
|
---|
| 830 | * Do segmentation and virtual page stuff.
|
---|
| 831 | */
|
---|
| 832 | #if ADDR_SIZE != 64
|
---|
[41906] | 833 | ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
|
---|
[36768] | 834 | #else
|
---|
| 835 | uint64_t uVirtAddr = uAddrReg;
|
---|
| 836 | #endif
|
---|
| 837 | uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 838 | if (cLeftPage > uCounterReg)
|
---|
| 839 | cLeftPage = uCounterReg;
|
---|
| 840 | if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
|
---|
| 841 | && cbIncr > 0 /** @todo Implement reverse direction string ops. */
|
---|
| 842 | #if ADDR_SIZE != 64
|
---|
[41906] | 843 | && uAddrReg < pCtx->es.u32Limit
|
---|
| 844 | && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
|
---|
[36768] | 845 | #endif
|
---|
| 846 | )
|
---|
| 847 | {
|
---|
| 848 | RTGCPHYS GCPhysMem;
|
---|
| 849 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
|
---|
| 850 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 851 | return rcStrict;
|
---|
[36768] | 852 |
|
---|
| 853 | /*
|
---|
| 854 | * If we can map the page without trouble, do a block processing
|
---|
| 855 | * until the end of the current page.
|
---|
| 856 | */
|
---|
[42193] | 857 | PGMPAGEMAPLOCK PgLockMem;
|
---|
[36768] | 858 | OP_TYPE *puMem;
|
---|
[42193] | 859 | rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
|
---|
[36768] | 860 | if (rcStrict == VINF_SUCCESS)
|
---|
| 861 | {
|
---|
| 862 | /* Update the regs first so we can loop on cLeftPage. */
|
---|
[39958] | 863 | pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
|
---|
| 864 | pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
|
---|
[36768] | 865 |
|
---|
| 866 | /* Do the memsetting. */
|
---|
| 867 | #if OP_SIZE == 8
|
---|
| 868 | memset(puMem, uValue, cLeftPage);
|
---|
| 869 | /*#elif OP_SIZE == 32
|
---|
| 870 | ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
|
---|
| 871 | #else
|
---|
| 872 | while (cLeftPage-- > 0)
|
---|
| 873 | *puMem++ = uValue;
|
---|
| 874 | #endif
|
---|
| 875 |
|
---|
[42193] | 876 | iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
|
---|
[39958] | 877 |
|
---|
[36768] | 878 | /* If unaligned, we drop thru and do the page crossing access
|
---|
| 879 | below. Otherwise, do the next page. */
|
---|
| 880 | if (!(uVirtAddr & (OP_SIZE - 1)))
|
---|
| 881 | continue;
|
---|
| 882 | if (uCounterReg == 0)
|
---|
| 883 | break;
|
---|
| 884 | cLeftPage = 0;
|
---|
| 885 | }
|
---|
| 886 | }
|
---|
| 887 |
|
---|
| 888 | /*
|
---|
| 889 | * Fallback - slow processing till the end of the current page.
|
---|
| 890 | * In the cross page boundrary case we will end up here with cLeftPage
|
---|
| 891 | * as 0, we execute one loop then.
|
---|
| 892 | */
|
---|
| 893 | do
|
---|
| 894 | {
|
---|
| 895 | rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
|
---|
| 896 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 897 | return rcStrict;
|
---|
| 898 | pCtx->ADDR_rDI = uAddrReg += cbIncr;
|
---|
| 899 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
[36768] | 900 | cLeftPage--;
|
---|
| 901 | } while ((int32_t)cLeftPage > 0);
|
---|
| 902 | } while (uCounterReg != 0);
|
---|
| 903 |
|
---|
| 904 | /*
|
---|
[39958] | 905 | * Done.
|
---|
[36768] | 906 | */
|
---|
[39958] | 907 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 908 | return VINF_SUCCESS;
|
---|
[36768] | 909 | }
|
---|
| 910 |
|
---|
| 911 |
|
---|
[36794] | 912 | /**
|
---|
| 913 | * Implements 'REP LODS'.
|
---|
| 914 | */
|
---|
| 915 | IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
|
---|
| 916 | {
|
---|
| 917 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
| 918 |
|
---|
| 919 | /*
|
---|
| 920 | * Setup.
|
---|
| 921 | */
|
---|
| 922 | ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
|
---|
| 923 | if (uCounterReg == 0)
|
---|
[36833] | 924 | {
|
---|
| 925 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[36794] | 926 | return VINF_SUCCESS;
|
---|
[36833] | 927 | }
|
---|
[36794] | 928 |
|
---|
| 929 | PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
|
---|
| 930 | VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
|
---|
| 931 | if (rcStrict != VINF_SUCCESS)
|
---|
| 932 | return rcStrict;
|
---|
| 933 |
|
---|
| 934 | int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
|
---|
| 935 | ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
|
---|
| 936 |
|
---|
| 937 | /*
|
---|
| 938 | * The loop.
|
---|
| 939 | */
|
---|
| 940 | do
|
---|
| 941 | {
|
---|
| 942 | /*
|
---|
| 943 | * Do segmentation and virtual page stuff.
|
---|
| 944 | */
|
---|
| 945 | #if ADDR_SIZE != 64
|
---|
| 946 | ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg;
|
---|
| 947 | #else
|
---|
| 948 | uint64_t uVirtAddr = uAddrReg;
|
---|
| 949 | #endif
|
---|
| 950 | uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 951 | if (cLeftPage > uCounterReg)
|
---|
| 952 | cLeftPage = uCounterReg;
|
---|
| 953 | if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
|
---|
| 954 | && cbIncr > 0 /** @todo Implement reverse direction string ops. */
|
---|
| 955 | #if ADDR_SIZE != 64
|
---|
| 956 | && uAddrReg < pSrcHid->u32Limit
|
---|
| 957 | && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
|
---|
| 958 | #endif
|
---|
| 959 | )
|
---|
| 960 | {
|
---|
| 961 | RTGCPHYS GCPhysMem;
|
---|
| 962 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
|
---|
| 963 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 964 | return rcStrict;
|
---|
[36794] | 965 |
|
---|
| 966 | /*
|
---|
| 967 | * If we can map the page without trouble, we can get away with
|
---|
| 968 | * just reading the last value on the page.
|
---|
| 969 | */
|
---|
[42193] | 970 | PGMPAGEMAPLOCK PgLockMem;
|
---|
[36794] | 971 | OP_TYPE const *puMem;
|
---|
[42193] | 972 | rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
|
---|
[36794] | 973 | if (rcStrict == VINF_SUCCESS)
|
---|
| 974 | {
|
---|
| 975 | /* Only get the last byte, the rest doesn't matter in direct access mode. */
|
---|
[39958] | 976 | #if OP_SIZE == 32
|
---|
| 977 | pCtx->rax = puMem[cLeftPage - 1];
|
---|
| 978 | #else
|
---|
| 979 | pCtx->OP_rAX = puMem[cLeftPage - 1];
|
---|
| 980 | #endif
|
---|
| 981 | pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
|
---|
| 982 | pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
|
---|
[42193] | 983 | iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
|
---|
[36794] | 984 |
|
---|
| 985 | /* If unaligned, we drop thru and do the page crossing access
|
---|
| 986 | below. Otherwise, do the next page. */
|
---|
| 987 | if (!(uVirtAddr & (OP_SIZE - 1)))
|
---|
| 988 | continue;
|
---|
| 989 | if (uCounterReg == 0)
|
---|
| 990 | break;
|
---|
| 991 | cLeftPage = 0;
|
---|
| 992 | }
|
---|
| 993 | }
|
---|
| 994 |
|
---|
| 995 | /*
|
---|
| 996 | * Fallback - slow processing till the end of the current page.
|
---|
| 997 | * In the cross page boundrary case we will end up here with cLeftPage
|
---|
| 998 | * as 0, we execute one loop then.
|
---|
| 999 | */
|
---|
| 1000 | do
|
---|
| 1001 | {
|
---|
| 1002 | OP_TYPE uTmpValue;
|
---|
| 1003 | rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
|
---|
| 1004 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 1005 | return rcStrict;
|
---|
| 1006 | #if OP_SIZE == 32
|
---|
| 1007 | pCtx->rax = uTmpValue;
|
---|
| 1008 | #else
|
---|
| 1009 | pCtx->OP_rAX = uTmpValue;
|
---|
| 1010 | #endif
|
---|
| 1011 | pCtx->ADDR_rSI = uAddrReg += cbIncr;
|
---|
| 1012 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
[36794] | 1013 | cLeftPage--;
|
---|
| 1014 | } while ((int32_t)cLeftPage > 0);
|
---|
| 1015 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1016 | break;
|
---|
| 1017 | } while (uCounterReg != 0);
|
---|
| 1018 |
|
---|
| 1019 | /*
|
---|
[39958] | 1020 | * Done.
|
---|
[36794] | 1021 | */
|
---|
[39958] | 1022 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 1023 | return VINF_SUCCESS;
|
---|
[36794] | 1024 | }
|
---|
| 1025 |
|
---|
| 1026 |
|
---|
[36768] | 1027 | #if OP_SIZE != 64
|
---|
| 1028 |
|
---|
| 1029 | /**
|
---|
| 1030 | * Implements 'INS' (no rep)
|
---|
| 1031 | */
|
---|
| 1032 | IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))
|
---|
| 1033 | {
|
---|
| 1034 | PVM pVM = IEMCPU_TO_VM(pIemCpu);
|
---|
| 1035 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
| 1036 | VBOXSTRICTRC rcStrict;
|
---|
| 1037 |
|
---|
| 1038 | /*
|
---|
[42761] | 1039 | * Be careful with handle bypassing.
|
---|
| 1040 | */
|
---|
| 1041 | if (pIemCpu->fBypassHandlers)
|
---|
| 1042 | {
|
---|
| 1043 | Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
|
---|
| 1044 | return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
|
---|
| 1045 | }
|
---|
| 1046 |
|
---|
| 1047 | /*
|
---|
[36768] | 1048 | * ASSUMES the #GP for I/O permission is taken first, then any #GP for
|
---|
| 1049 | * segmentation and finally any #PF due to virtual address translation.
|
---|
| 1050 | * ASSUMES nothing is read from the I/O port before traps are taken.
|
---|
| 1051 | */
|
---|
| 1052 | rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
|
---|
| 1053 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1054 | return rcStrict;
|
---|
| 1055 |
|
---|
| 1056 | OP_TYPE *puMem;
|
---|
| 1057 | rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
|
---|
| 1058 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1059 | return rcStrict;
|
---|
| 1060 |
|
---|
| 1061 | uint32_t u32Value;
|
---|
[36849] | 1062 | if (!IEM_VERIFICATION_ENABLED(pIemCpu))
|
---|
[36829] | 1063 | rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8);
|
---|
| 1064 | else
|
---|
| 1065 | rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
|
---|
[36768] | 1066 | if (IOM_SUCCESS(rcStrict))
|
---|
| 1067 | {
|
---|
| 1068 | VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
|
---|
| 1069 | if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
|
---|
| 1070 | {
|
---|
| 1071 | if (!pCtx->eflags.Bits.u1DF)
|
---|
| 1072 | pCtx->ADDR_rDI += OP_SIZE / 8;
|
---|
| 1073 | else
|
---|
| 1074 | pCtx->ADDR_rDI -= OP_SIZE / 8;
|
---|
| 1075 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 1076 | }
|
---|
| 1077 | /* iemMemMap already check permissions, so this may only be real errors
|
---|
| 1078 | or access handlers medling. The access handler case is going to
|
---|
| 1079 | cause misbehavior if the instruction is re-interpreted or smth. So,
|
---|
| 1080 | we fail with an internal error here instead. */
|
---|
| 1081 | else
|
---|
[39958] | 1082 | AssertLogRelFailedReturn(VERR_IEM_IPE_1);
|
---|
[36768] | 1083 | }
|
---|
| 1084 | return rcStrict;
|
---|
| 1085 | }
|
---|
| 1086 |
|
---|
[36794] | 1087 |
|
---|
[36768] | 1088 | /**
|
---|
| 1089 | * Implements 'REP INS'.
|
---|
| 1090 | */
|
---|
| 1091 | IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
|
---|
| 1092 | {
|
---|
| 1093 | PVM pVM = IEMCPU_TO_VM(pIemCpu);
|
---|
| 1094 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
| 1095 |
|
---|
| 1096 | /*
|
---|
| 1097 | * Setup.
|
---|
| 1098 | */
|
---|
| 1099 | uint16_t const u16Port = pCtx->dx;
|
---|
| 1100 | VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
|
---|
| 1101 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1102 | return rcStrict;
|
---|
| 1103 |
|
---|
| 1104 | ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
|
---|
| 1105 | if (uCounterReg == 0)
|
---|
[36833] | 1106 | {
|
---|
| 1107 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[36768] | 1108 | return VINF_SUCCESS;
|
---|
[36833] | 1109 | }
|
---|
[36768] | 1110 |
|
---|
[41906] | 1111 | rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
|
---|
[36768] | 1112 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1113 | return rcStrict;
|
---|
| 1114 |
|
---|
| 1115 | int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
|
---|
| 1116 | ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
|
---|
| 1117 |
|
---|
| 1118 | /*
|
---|
[42761] | 1119 | * Be careful with handle bypassing.
|
---|
| 1120 | */
|
---|
| 1121 | if (pIemCpu->fBypassHandlers)
|
---|
| 1122 | {
|
---|
| 1123 | Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
|
---|
| 1124 | return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
|
---|
| 1125 | }
|
---|
| 1126 |
|
---|
| 1127 | /*
|
---|
[36768] | 1128 | * The loop.
|
---|
| 1129 | */
|
---|
| 1130 | do
|
---|
| 1131 | {
|
---|
| 1132 | /*
|
---|
| 1133 | * Do segmentation and virtual page stuff.
|
---|
| 1134 | */
|
---|
| 1135 | #if ADDR_SIZE != 64
|
---|
[41906] | 1136 | ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
|
---|
[36768] | 1137 | #else
|
---|
| 1138 | uint64_t uVirtAddr = uAddrReg;
|
---|
| 1139 | #endif
|
---|
| 1140 | uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 1141 | if (cLeftPage > uCounterReg)
|
---|
| 1142 | cLeftPage = uCounterReg;
|
---|
| 1143 | if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
|
---|
| 1144 | && cbIncr > 0 /** @todo Implement reverse direction string ops. */
|
---|
| 1145 | #if ADDR_SIZE != 64
|
---|
[41906] | 1146 | && uAddrReg < pCtx->es.u32Limit
|
---|
| 1147 | && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
|
---|
[36768] | 1148 | #endif
|
---|
| 1149 | )
|
---|
| 1150 | {
|
---|
| 1151 | RTGCPHYS GCPhysMem;
|
---|
| 1152 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
|
---|
| 1153 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 1154 | return rcStrict;
|
---|
[36768] | 1155 |
|
---|
| 1156 | /*
|
---|
| 1157 | * If we can map the page without trouble, we would've liked to use
|
---|
| 1158 | * an string I/O method to do the work, but the current IOM
|
---|
| 1159 | * interface doesn't match our current approach. So, do a regular
|
---|
| 1160 | * loop instead.
|
---|
| 1161 | */
|
---|
| 1162 | /** @todo Change the I/O manager interface to make use of
|
---|
| 1163 | * mapped buffers instead of leaving those bits to the
|
---|
| 1164 | * device implementation? */
|
---|
[42193] | 1165 | PGMPAGEMAPLOCK PgLockMem;
|
---|
[36768] | 1166 | OP_TYPE *puMem;
|
---|
[42193] | 1167 | rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
|
---|
[36768] | 1168 | if (rcStrict == VINF_SUCCESS)
|
---|
| 1169 | {
|
---|
[39958] | 1170 | uint32_t off = 0;
|
---|
| 1171 | while (off < cLeftPage)
|
---|
[36768] | 1172 | {
|
---|
| 1173 | uint32_t u32Value;
|
---|
[36849] | 1174 | if (!IEM_VERIFICATION_ENABLED(pIemCpu))
|
---|
[36829] | 1175 | rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
|
---|
| 1176 | else
|
---|
| 1177 | rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
|
---|
[39958] | 1178 | if (IOM_SUCCESS(rcStrict))
|
---|
| 1179 | {
|
---|
| 1180 | puMem[off] = (OP_TYPE)u32Value;
|
---|
| 1181 | pCtx->ADDR_rDI = uAddrReg += cbIncr;
|
---|
| 1182 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
| 1183 | }
|
---|
[36768] | 1184 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1185 | {
|
---|
[42453] | 1186 | if (IOM_SUCCESS(rcStrict))
|
---|
| 1187 | rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
|
---|
[39958] | 1188 | if (uCounterReg == 0)
|
---|
| 1189 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[42193] | 1190 | iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
|
---|
[39958] | 1191 | return rcStrict;
|
---|
[36768] | 1192 | }
|
---|
[39958] | 1193 | off++;
|
---|
[36768] | 1194 | }
|
---|
[42193] | 1195 | iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
|
---|
[36768] | 1196 |
|
---|
| 1197 | /* If unaligned, we drop thru and do the page crossing access
|
---|
| 1198 | below. Otherwise, do the next page. */
|
---|
| 1199 | if (!(uVirtAddr & (OP_SIZE - 1)))
|
---|
| 1200 | continue;
|
---|
| 1201 | if (uCounterReg == 0)
|
---|
| 1202 | break;
|
---|
| 1203 | cLeftPage = 0;
|
---|
| 1204 | }
|
---|
| 1205 | }
|
---|
| 1206 |
|
---|
| 1207 | /*
|
---|
| 1208 | * Fallback - slow processing till the end of the current page.
|
---|
| 1209 | * In the cross page boundrary case we will end up here with cLeftPage
|
---|
| 1210 | * as 0, we execute one loop then.
|
---|
| 1211 | *
|
---|
| 1212 | * Note! We ASSUME the CPU will raise #PF or #GP before access the
|
---|
| 1213 | * I/O port, otherwise it wouldn't really be restartable.
|
---|
| 1214 | */
|
---|
| 1215 | /** @todo investigate what the CPU actually does with \#PF/\#GP
|
---|
| 1216 | * during INS. */
|
---|
| 1217 | do
|
---|
| 1218 | {
|
---|
| 1219 | OP_TYPE *puMem;
|
---|
| 1220 | rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
|
---|
| 1221 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 1222 | return rcStrict;
|
---|
[36768] | 1223 |
|
---|
| 1224 | uint32_t u32Value;
|
---|
[36849] | 1225 | if (!IEM_VERIFICATION_ENABLED(pIemCpu))
|
---|
[36829] | 1226 | rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
|
---|
| 1227 | else
|
---|
| 1228 | rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
|
---|
[36768] | 1229 | if (!IOM_SUCCESS(rcStrict))
|
---|
[39958] | 1230 | return rcStrict;
|
---|
[36768] | 1231 |
|
---|
[39970] | 1232 | *puMem = (OP_TYPE)u32Value;
|
---|
[36768] | 1233 | VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
|
---|
[39958] | 1234 | AssertLogRelReturn(rcStrict2 == VINF_SUCCESS, VERR_IEM_IPE_1); /* See non-rep version. */
|
---|
[36768] | 1235 |
|
---|
[39958] | 1236 | pCtx->ADDR_rDI = uAddrReg += cbIncr;
|
---|
| 1237 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
| 1238 |
|
---|
[36768] | 1239 | cLeftPage--;
|
---|
| 1240 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1241 | {
|
---|
[42453] | 1242 | if (IOM_SUCCESS(rcStrict))
|
---|
| 1243 | rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
|
---|
[39958] | 1244 | if (uCounterReg == 0)
|
---|
| 1245 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 1246 | return rcStrict;
|
---|
[36768] | 1247 | }
|
---|
| 1248 | } while ((int32_t)cLeftPage > 0);
|
---|
| 1249 | } while (uCounterReg != 0);
|
---|
| 1250 |
|
---|
| 1251 | /*
|
---|
[39958] | 1252 | * Done.
|
---|
[36768] | 1253 | */
|
---|
[39958] | 1254 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 1255 | return VINF_SUCCESS;
|
---|
[36768] | 1256 | }
|
---|
| 1257 |
|
---|
| 1258 |
|
---|
| 1259 | /**
|
---|
| 1260 | * Implements 'OUTS' (no rep)
|
---|
| 1261 | */
|
---|
[36794] | 1262 | IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
|
---|
[36768] | 1263 | {
|
---|
| 1264 | PVM pVM = IEMCPU_TO_VM(pIemCpu);
|
---|
| 1265 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
| 1266 | VBOXSTRICTRC rcStrict;
|
---|
| 1267 |
|
---|
| 1268 | /*
|
---|
| 1269 | * ASSUMES the #GP for I/O permission is taken first, then any #GP for
|
---|
| 1270 | * segmentation and finally any #PF due to virtual address translation.
|
---|
| 1271 | * ASSUMES nothing is read from the I/O port before traps are taken.
|
---|
| 1272 | */
|
---|
| 1273 | rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
|
---|
| 1274 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1275 | return rcStrict;
|
---|
| 1276 |
|
---|
| 1277 | OP_TYPE uValue;
|
---|
[36794] | 1278 | rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
|
---|
[36768] | 1279 | if (rcStrict == VINF_SUCCESS)
|
---|
| 1280 | {
|
---|
[36849] | 1281 | if (!IEM_VERIFICATION_ENABLED(pIemCpu))
|
---|
[36829] | 1282 | rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8);
|
---|
| 1283 | else
|
---|
| 1284 | rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
|
---|
[36768] | 1285 | if (IOM_SUCCESS(rcStrict))
|
---|
| 1286 | {
|
---|
| 1287 | if (!pCtx->eflags.Bits.u1DF)
|
---|
[36794] | 1288 | pCtx->ADDR_rSI += OP_SIZE / 8;
|
---|
[36768] | 1289 | else
|
---|
[36794] | 1290 | pCtx->ADDR_rSI -= OP_SIZE / 8;
|
---|
[36768] | 1291 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[42453] | 1292 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1293 | rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
|
---|
[36768] | 1294 | }
|
---|
| 1295 | }
|
---|
| 1296 | return rcStrict;
|
---|
| 1297 | }
|
---|
| 1298 |
|
---|
[36794] | 1299 |
|
---|
[36768] | 1300 | /**
|
---|
| 1301 | * Implements 'REP OUTS'.
|
---|
| 1302 | */
|
---|
[36794] | 1303 | IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
|
---|
[36768] | 1304 | {
|
---|
| 1305 | PVM pVM = IEMCPU_TO_VM(pIemCpu);
|
---|
| 1306 | PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
|
---|
| 1307 |
|
---|
| 1308 | /*
|
---|
| 1309 | * Setup.
|
---|
| 1310 | */
|
---|
| 1311 | uint16_t const u16Port = pCtx->dx;
|
---|
| 1312 | VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
|
---|
| 1313 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1314 | return rcStrict;
|
---|
| 1315 |
|
---|
| 1316 | ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
|
---|
| 1317 | if (uCounterReg == 0)
|
---|
[36833] | 1318 | {
|
---|
| 1319 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[36768] | 1320 | return VINF_SUCCESS;
|
---|
[36833] | 1321 | }
|
---|
[36768] | 1322 |
|
---|
[36794] | 1323 | PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
|
---|
| 1324 | rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg);
|
---|
[36768] | 1325 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1326 | return rcStrict;
|
---|
| 1327 |
|
---|
| 1328 | int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
|
---|
[36794] | 1329 | ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
|
---|
[36768] | 1330 |
|
---|
| 1331 | /*
|
---|
| 1332 | * The loop.
|
---|
| 1333 | */
|
---|
| 1334 | do
|
---|
| 1335 | {
|
---|
| 1336 | /*
|
---|
| 1337 | * Do segmentation and virtual page stuff.
|
---|
| 1338 | */
|
---|
| 1339 | #if ADDR_SIZE != 64
|
---|
[36794] | 1340 | ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg;
|
---|
[36768] | 1341 | #else
|
---|
| 1342 | uint64_t uVirtAddr = uAddrReg;
|
---|
| 1343 | #endif
|
---|
| 1344 | uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
|
---|
| 1345 | if (cLeftPage > uCounterReg)
|
---|
| 1346 | cLeftPage = uCounterReg;
|
---|
| 1347 | if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
|
---|
| 1348 | && cbIncr > 0 /** @todo Implement reverse direction string ops. */
|
---|
| 1349 | #if ADDR_SIZE != 64
|
---|
[36794] | 1350 | && uAddrReg < pHid->u32Limit
|
---|
| 1351 | && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit
|
---|
[36768] | 1352 | #endif
|
---|
| 1353 | )
|
---|
| 1354 | {
|
---|
| 1355 | RTGCPHYS GCPhysMem;
|
---|
| 1356 | rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
|
---|
| 1357 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 1358 | return rcStrict;
|
---|
[36768] | 1359 |
|
---|
| 1360 | /*
|
---|
| 1361 | * If we can map the page without trouble, we would've liked to use
|
---|
| 1362 | * an string I/O method to do the work, but the current IOM
|
---|
| 1363 | * interface doesn't match our current approach. So, do a regular
|
---|
| 1364 | * loop instead.
|
---|
| 1365 | */
|
---|
| 1366 | /** @todo Change the I/O manager interface to make use of
|
---|
| 1367 | * mapped buffers instead of leaving those bits to the
|
---|
| 1368 | * device implementation? */
|
---|
[42193] | 1369 | PGMPAGEMAPLOCK PgLockMem;
|
---|
[36768] | 1370 | OP_TYPE const *puMem;
|
---|
[42193] | 1371 | rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
|
---|
[36768] | 1372 | if (rcStrict == VINF_SUCCESS)
|
---|
| 1373 | {
|
---|
[39958] | 1374 | uint32_t off = 0;
|
---|
| 1375 | while (off < cLeftPage)
|
---|
[36768] | 1376 | {
|
---|
| 1377 | uint32_t u32Value = *puMem++;
|
---|
[36849] | 1378 | if (!IEM_VERIFICATION_ENABLED(pIemCpu))
|
---|
[36829] | 1379 | rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8);
|
---|
| 1380 | else
|
---|
| 1381 | rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
|
---|
[39958] | 1382 | if (IOM_SUCCESS(rcStrict))
|
---|
| 1383 | {
|
---|
| 1384 | pCtx->ADDR_rSI = uAddrReg += cbIncr;
|
---|
| 1385 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
| 1386 | }
|
---|
[36768] | 1387 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1388 | {
|
---|
[42453] | 1389 | if (IOM_SUCCESS(rcStrict))
|
---|
| 1390 | rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
|
---|
[39958] | 1391 | if (uCounterReg == 0)
|
---|
| 1392 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
[42193] | 1393 | iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
|
---|
[39958] | 1394 | return rcStrict;
|
---|
[36768] | 1395 | }
|
---|
[39958] | 1396 | off++;
|
---|
[36768] | 1397 | }
|
---|
[42193] | 1398 | iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
|
---|
[36768] | 1399 |
|
---|
| 1400 | /* If unaligned, we drop thru and do the page crossing access
|
---|
| 1401 | below. Otherwise, do the next page. */
|
---|
| 1402 | if (!(uVirtAddr & (OP_SIZE - 1)))
|
---|
| 1403 | continue;
|
---|
| 1404 | if (uCounterReg == 0)
|
---|
| 1405 | break;
|
---|
| 1406 | cLeftPage = 0;
|
---|
| 1407 | }
|
---|
| 1408 | }
|
---|
| 1409 |
|
---|
| 1410 | /*
|
---|
| 1411 | * Fallback - slow processing till the end of the current page.
|
---|
| 1412 | * In the cross page boundrary case we will end up here with cLeftPage
|
---|
| 1413 | * as 0, we execute one loop then.
|
---|
| 1414 | *
|
---|
| 1415 | * Note! We ASSUME the CPU will raise #PF or #GP before access the
|
---|
| 1416 | * I/O port, otherwise it wouldn't really be restartable.
|
---|
| 1417 | */
|
---|
| 1418 | /** @todo investigate what the CPU actually does with \#PF/\#GP
|
---|
| 1419 | * during INS. */
|
---|
| 1420 | do
|
---|
| 1421 | {
|
---|
| 1422 | OP_TYPE uValue;
|
---|
[36794] | 1423 | rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
|
---|
[36768] | 1424 | if (rcStrict != VINF_SUCCESS)
|
---|
[39958] | 1425 | return rcStrict;
|
---|
[36768] | 1426 |
|
---|
[36849] | 1427 | if (!IEM_VERIFICATION_ENABLED(pIemCpu))
|
---|
[36829] | 1428 | rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8);
|
---|
| 1429 | else
|
---|
| 1430 | rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
|
---|
[39958] | 1431 | if (IOM_SUCCESS(rcStrict))
|
---|
| 1432 | {
|
---|
| 1433 | pCtx->ADDR_rSI = uAddrReg += cbIncr;
|
---|
| 1434 | pCtx->ADDR_rCX = --uCounterReg;
|
---|
| 1435 | cLeftPage--;
|
---|
| 1436 | }
|
---|
[36768] | 1437 | if (rcStrict != VINF_SUCCESS)
|
---|
| 1438 | {
|
---|
[42453] | 1439 | if (IOM_SUCCESS(rcStrict))
|
---|
| 1440 | rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
|
---|
[39958] | 1441 | if (uCounterReg == 0)
|
---|
| 1442 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 1443 | return rcStrict;
|
---|
[36768] | 1444 | }
|
---|
| 1445 | } while ((int32_t)cLeftPage > 0);
|
---|
| 1446 | } while (uCounterReg != 0);
|
---|
| 1447 |
|
---|
| 1448 | /*
|
---|
[39958] | 1449 | * Done.
|
---|
[36768] | 1450 | */
|
---|
[39958] | 1451 | iemRegAddToRip(pIemCpu, cbInstr);
|
---|
| 1452 | return VINF_SUCCESS;
|
---|
[36768] | 1453 | }
|
---|
| 1454 |
|
---|
| 1455 | #endif /* OP_SIZE != 64-bit */
|
---|
| 1456 |
|
---|
| 1457 |
|
---|
| 1458 | #undef OP_rAX
|
---|
| 1459 | #undef OP_SIZE
|
---|
| 1460 | #undef ADDR_SIZE
|
---|
| 1461 | #undef ADDR_rDI
|
---|
| 1462 | #undef ADDR_rSI
|
---|
| 1463 | #undef ADDR_rCX
|
---|
| 1464 | #undef ADDR_rIP
|
---|
| 1465 | #undef ADDR2_TYPE
|
---|
| 1466 | #undef ADDR_TYPE
|
---|
| 1467 | #undef ADDR2_TYPE
|
---|
| 1468 |
|
---|