- Timestamp:
- Jul 25, 2013 6:05:08 PM (11 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
-
VMMAll/IEMAll.cpp (modified) (2 diffs)
-
VMMAll/IEMAllAImpl.asm (modified) (1 diff)
-
VMMAll/IEMAllInstructions.cpp.h (modified) (1 diff)
-
include/IEMInternal.h (modified) (1 diff)
-
testcase/tstIEMCheckMc.cpp (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r47394 r47399 7535 7535 } while (0) 7536 7536 7537 /** 7538 * Calls a MMX assembly implementation taking three visible arguments. 7539 * 7540 * @param a_pfnAImpl Pointer to the assembly MMX routine. 7541 * @param a0 The first extra argument. 7542 * @param a1 The second extra argument. 7543 * @param a2 The third extra argument. 7544 */ 7545 #define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \ 7546 do { \ 7547 iemFpuPrepareUsage(pIemCpu); \ 7548 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \ 7549 } while (0) 7550 7537 7551 7538 7552 /** … … 7547 7561 iemFpuPrepareUsageSse(pIemCpu); \ 7548 7562 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \ 7563 } while (0) 7564 7565 /** 7566 * Calls a SSE assembly implementation taking three visible arguments. 7567 * 7568 * @param a_pfnAImpl Pointer to the assembly MMX routine. 7569 * @param a0 The first extra argument. 7570 * @param a1 The second extra argument. 7571 * @param a2 The third extra argument. 7572 */ 7573 #define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \ 7574 do { \ 7575 iemFpuPrepareUsageSse(pIemCpu); \ 7576 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1), (a2)); \ 7549 7577 } while (0) 7550 7578 -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r47385 r47399 2906 2906 IEMIMPL_MEDIA_F1L1 punpckhqdq, 0 2907 2907 2908 2909 ; 2910 ; Shufflers with evil 8-bit immediates. 2911 ; 2912 2913 BEGINPROC_FASTCALL iemAImpl_pshufw, 16 2914 PROLOGUE_4_ARGS 2915 IEMIMPL_MMX_PROLOGUE 2916 2917 movq mm0, [A1] 2918 movq mm1, [A2] 2919 lea T0, [A3 + A3*4] ; sizeof(pshufw+ret) == 5 2920 lea T1, [.imm0 xWrtRIP] 2921 lea T1, [T1 + T0] 2922 call T1 2923 movq [A1], mm0 2924 2925 IEMIMPL_MMX_EPILOGUE 2926 EPILOGUE_4_ARGS 2927 %assign bImm 0 2928 %rep 256 2929 .imm %+ bImm: 2930 pshufw mm0, mm1, bImm 2931 ret 2932 %assign bImm bImm + 1 2933 %endrep 2934 .immEnd: ; 256*5 == 0x500 2935 dw 0xfaff + (.immEnd - .imm0) ; will cause warning if entries are too big. 2936 dw 0x104ff - (.immEnd - .imm0) ; will cause warning if entries are small big. 2937 ENDPROC iemAImpl_pshufw 2938 2939 2940 %macro IEMIMPL_MEDIA_SSE_PSHUFXX 1 2941 BEGINPROC_FASTCALL iemAImpl_ %+ %1, 16 2942 PROLOGUE_4_ARGS 2943 IEMIMPL_SSE_PROLOGUE 2944 2945 movdqu xmm0, [A1] 2946 movdqu xmm1, [A2] 2947 lea T1, [.imm0 xWrtRIP] 2948 lea T0, [A3 + A3*2] ; sizeof(pshufXX+ret) == 6: (A3 * 3) *2 2949 lea T1, [T1 + T0*2] 2950 call T1 2951 movdqu [A1], xmm0 2952 2953 IEMIMPL_SSE_EPILOGUE 2954 EPILOGUE_4_ARGS 2955 %assign bImm 0 2956 %rep 256 2957 .imm %+ bImm: 2958 %1 xmm0, xmm1, bImm 2959 ret 2960 %assign bImm bImm + 1 2961 %endrep 2962 .immEnd: ; 256*6 == 0x600 2963 dw 0xf9ff + (.immEnd - .imm0) ; will cause warning if entries are too big. 2964 dw 0x105ff - (.immEnd - .imm0) ; will cause warning if entries are small big. 2965 ENDPROC iemAImpl_ %+ %1 2966 %endmacro 2967 2968 IEMIMPL_MEDIA_SSE_PSHUFXX pshufhw 2969 IEMIMPL_MEDIA_SSE_PSHUFXX pshuflw 2970 IEMIMPL_MEDIA_SSE_PSHUFXX pshufd -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r47394 r47399 2314 2314 } 2315 2315 2316 /** Opcode 0x0f 0x70. */ 2317 FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib); // NEXT 2316 2317 /** Opcode 0x0f 0x70. The immediate here is evil! */ 2318 FNIEMOP_DEF(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib) 2319 { 2320 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2321 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2322 { 2323 case IEM_OP_PRF_SIZE_OP: /* SSE */ 2324 case IEM_OP_PRF_REPNZ: /* SSE */ 2325 case IEM_OP_PRF_REPZ: /* SSE */ 2326 { 2327 PFNIEMAIMPLMEDIAPSHUF pfnAImpl; 2328 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2329 { 2330 case IEM_OP_PRF_SIZE_OP: 2331 IEMOP_MNEMONIC("pshufd Vdq,Wdq,Ib"); 2332 pfnAImpl = iemAImpl_pshufd; 2333 break; 2334 case IEM_OP_PRF_REPNZ: 2335 IEMOP_MNEMONIC("pshuflw Vdq,Wdq,Ib"); 2336 pfnAImpl = iemAImpl_pshuflw; 2337 break; 2338 case IEM_OP_PRF_REPZ: 2339 IEMOP_MNEMONIC("pshufhw Vdq,Wdq,Ib"); 2340 pfnAImpl = iemAImpl_pshufhw; 2341 break; 2342 } 2343 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2344 { 2345 /* 2346 * Register, register. 2347 */ 2348 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil); 2349 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2350 2351 IEM_MC_BEGIN(3, 0); 2352 IEM_MC_ARG(uint128_t *, pDst, 0); 2353 IEM_MC_ARG(uint128_t const *, pSrc, 1); 2354 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2); 2355 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2356 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2357 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 2358 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg); 2359 IEM_MC_ADVANCE_RIP(); 2360 IEM_MC_END(); 2361 } 2362 else 2363 { 2364 /* 2365 * Register, memory. 2366 */ 2367 IEM_MC_BEGIN(3, 2); 2368 IEM_MC_ARG(uint128_t *, pDst, 0); 2369 IEM_MC_LOCAL(uint128_t, uSrc); 2370 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1); 2371 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2372 2373 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2374 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil); 2375 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2); 2376 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2377 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2378 2379 IEM_MC_FETCH_MEM_U128_ALIGN(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); 2380 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2381 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg); 2382 2383 IEM_MC_ADVANCE_RIP(); 2384 IEM_MC_END(); 2385 } 2386 return VINF_SUCCESS; 2387 } 2388 2389 case 0: /* MMX Extension */ 2390 IEMOP_MNEMONIC("pshufw Pq,Qq,Ib"); 2391 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2392 { 2393 /* 2394 * Register, register. 2395 */ 2396 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil); 2397 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2398 2399 IEM_MC_BEGIN(3, 0); 2400 IEM_MC_ARG(uint64_t *, pDst, 0); 2401 IEM_MC_ARG(uint64_t const *, pSrc, 1); 2402 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2); 2403 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2404 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2405 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK); 2406 IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg); 2407 IEM_MC_ADVANCE_RIP(); 2408 IEM_MC_END(); 2409 } 2410 else 2411 { 2412 /* 2413 * Register, memory. 2414 */ 2415 IEM_MC_BEGIN(3, 2); 2416 IEM_MC_ARG(uint64_t *, pDst, 0); 2417 IEM_MC_LOCAL(uint64_t, uSrc); 2418 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); 2419 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2420 2421 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2422 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil); 2423 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2); 2424 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2425 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2426 2427 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); 2428 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2429 IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg); 2430 2431 IEM_MC_ADVANCE_RIP(); 2432 IEM_MC_END(); 2433 } 2434 return VINF_SUCCESS; 2435 2436 default: 2437 return IEMOP_RAISE_INVALID_OPCODE(); 2438 } 2439 } 2440 2318 2441 2319 2442 /** Opcode 0x0f 0x71 11/2. */ -
trunk/src/VBox/VMM/include/IEMInternal.h
r47385 r47399 1096 1096 FNIEMAIMPLMEDIAF1H1U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64; 1097 1097 FNIEMAIMPLMEDIAF1H1U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128; 1098 /** @} */ 1098 /** @} */ 1099 1100 /** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil) 1101 * @{ */ 1102 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUF,(PCX86FXSTATE pFpuState, uint128_t *pu128Dst, 1103 uint128_t const *pu128Src, uint8_t bEvil)); 1104 typedef FNIEMAIMPLMEDIAPSHUF *PFNIEMAIMPLMEDIAPSHUF; 1105 FNIEMAIMPLMEDIAPSHUF iemAImpl_pshufhw, iemAImpl_pshuflw, iemAImpl_pshufd; 1106 IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src, uint8_t bEvil)); 1107 /** @} */ 1108 1099 1109 1100 1110 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r47395 r47399 64 64 uint8_t iMySeg = (a_iSeg); NOREF(iMySeg); /** @todo const or variable. grr. */ \ 65 65 } while (0) 66 67 #define CHK_CALL_ARG(a_Name, a_iArg) \ 68 do { RT_CONCAT3(iArgCheck_,a_iArg,a_Name) = 1; } while (0) 66 69 67 70 … … 266 269 #define iemCImpl_callf NULL 267 270 #define iemCImpl_FarJmp NULL 271 272 #define iemAImpl_pshufhw NULL 273 #define iemAImpl_pshuflw NULL 274 #define iemAImpl_pshufd NULL 268 275 269 276 /** @} */ … … 590 597 #define IEM_MC_USED_FPU() do { } while (0) 591 598 592 #define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) do { } while (0) 593 #define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) do { } while (0) 599 #define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \ 600 do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); } while (0) 601 #define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \ 602 do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2);} while (0) 603 #define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \ 604 do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); } while (0) 605 #define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \ 606 do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2);} while (0) 594 607 595 608 #define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (g_fRandom) {
Note:
See TracChangeset
for help on using the changeset viewer.

