- Timestamp:
- May 18, 2017 2:24:43 PM (7 years ago)
- Location:
- trunk
- Files:
-
- 9 edited
-
include/VBox/disopcode.h (modified) (2 diffs)
-
src/VBox/VMM/VMMAll/IEMAll.cpp (modified) (14 diffs)
-
src/VBox/VMM/VMMAll/IEMAllAImplC.cpp (modified) (1 diff)
-
src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py (modified) (3 diffs)
-
src/VBox/VMM/VMMAll/IEMAllInstructionsVexMap1.cpp.h (modified) (1 diff)
-
src/VBox/VMM/include/IEMInternal.h (modified) (1 diff)
-
src/VBox/VMM/testcase/tstIEMCheckMc.cpp (modified) (5 diffs)
-
src/VBox/ValidationKit/bootsectors/bs3-cpu-generated-1-template.c (modified) (27 diffs)
-
src/VBox/ValidationKit/bootsectors/bs3-cpu-generated-1.h (modified) (4 diffs)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/disopcode.h
r66937 r66950 783 783 OP_VMOVLPS, 784 784 OP_VMOVLPD, 785 OP_VMOVSLDUP, 785 786 /** @} */ 786 787 OP_END_OF_OPCODES … … 1101 1102 #define OP_PARM_Vss_WO OP_PARM_Vss /**< Annotates write only operand. */ 1102 1103 #define OP_PARM_Vsd_WO OP_PARM_Vsd /**< Annotates write only operand. */ 1104 #define OP_PARM_Vx_WO OP_PARM_Vx /**< Annotates write only operand. */ 1103 1105 #define OP_PARM_Wpd_WO OP_PARM_Wpd /**< Annotates write only operand. */ 1104 1106 #define OP_PARM_Wps_WO OP_PARM_Wps /**< Annotates write only operand. */ -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r66932 r66950 11527 11527 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11528 11528 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11529 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11529 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11530 11530 } while (0) 11531 11531 #define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \ … … 11536 11536 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11537 11537 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11538 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11538 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11539 11539 } while (0) 11540 11540 #define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \ … … 11545 11545 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11546 11546 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11547 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11547 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11548 11548 } while (0) 11549 11549 #define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \ … … 11554 11554 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \ 11555 11555 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \ 11556 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \ 11556 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11557 } while (0) 11558 11559 #define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \ 11560 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm) 11561 #define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \ 11562 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm) 11563 #define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \ 11564 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0]) 11565 #define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \ 11566 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \ 11567 uintptr_t const iYRegTmp = (a_iYReg); \ 11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \ 11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \ 11570 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \ 11557 11571 } while (0) 11558 11572 … … 11565 11579 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \ 11566 11580 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \ 11567 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11581 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11568 11582 } while (0) 11569 11583 #define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \ … … 11575 11589 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11576 11590 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11577 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11591 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11578 11592 } while (0) 11579 11593 … … 11588 11602 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11589 11603 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11590 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11604 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11591 11605 } while (0) 11592 11606 #define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \ … … 11599 11613 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11600 11614 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11601 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11615 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11602 11616 } while (0) 11603 11617 #define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \ … … 11610 11624 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11611 11625 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11612 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11626 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11613 11627 } while (0) 11614 11628 #define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \ … … 11620 11634 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11621 11635 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11622 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11636 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11623 11637 } while (0) 11624 11638 … … 11727 11741 # define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \ 11728 11742 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))) 11729 # define IEM_MC_FETCH_MEM_U256_ALIGN_ SSE(a_u256Dst, a_iSeg, a_GCPtrMem) \11743 # define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \ 11730 11744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))) 11731 11745 #else 11732 11746 # define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \ 11733 11747 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)) 11734 # define IEM_MC_FETCH_MEM_U256_ALIGN_ SSE(a_u256Dst, a_iSeg, a_GCPtrMem) \11748 # define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \ 11735 11749 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)) 11736 11750 #endif … … 12296 12310 * Calls a SSE assembly implementation taking two visible arguments. 12297 12311 * 12298 * @param a_pfnAImpl Pointer to the assembly MMXroutine.12312 * @param a_pfnAImpl Pointer to the assembly SSE routine. 12299 12313 * @param a0 The first extra argument. 12300 12314 * @param a1 The second extra argument. … … 12309 12323 * Calls a SSE assembly implementation taking three visible arguments. 12310 12324 * 12311 * @param a_pfnAImpl Pointer to the assembly MMXroutine.12325 * @param a_pfnAImpl Pointer to the assembly SSE routine. 12312 12326 * @param a0 The first extra argument. 12313 12327 * @param a1 The second extra argument. … … 12318 12332 IEM_MC_PREPARE_SSE_USAGE(); \ 12319 12333 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 12334 } while (0) 12335 12336 12337 /** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2, 12338 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */ 12339 #define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \ 12340 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0) 12341 12342 /** 12343 * Calls a AVX assembly implementation taking two visible arguments. 12344 * 12345 * There is one implicit zero'th argument, a pointer to the extended state. 12346 * 12347 * @param a_pfnAImpl Pointer to the assembly AVX routine. 12348 * @param a1 The first extra argument. 12349 * @param a2 The second extra argument. 12350 */ 12351 #define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \ 12352 do { \ 12353 IEM_MC_PREPARE_AVX_USAGE(); \ 12354 a_pfnAImpl(pXState, (a1), (a2)); \ 12355 } while (0) 12356 12357 /** 12358 * Calls a AVX assembly implementation taking three visible arguments. 12359 * 12360 * There is one implicit zero'th argument, a pointer to the extended state. 12361 * 12362 * @param a_pfnAImpl Pointer to the assembly AVX routine. 12363 * @param a1 The first extra argument. 12364 * @param a2 The second extra argument. 12365 * @param a3 The third extra argument. 12366 */ 12367 #define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \ 12368 do { \ 12369 IEM_MC_PREPARE_AVX_USAGE(); \ 12370 a_pfnAImpl(pXState, (a1), (a2), (a3)); \ 12320 12371 } while (0) 12321 12372 -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r66789 r66950 1382 1382 1383 1383 1384 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc)) 1385 { 1386 pXState->x87.aXMM[iYRegDst].au32[0] = pXState->x87.aXMM[iYRegSrc].au32[0]; 1387 pXState->x87.aXMM[iYRegDst].au32[1] = pXState->x87.aXMM[iYRegSrc].au32[0]; 1388 pXState->x87.aXMM[iYRegDst].au32[2] = pXState->x87.aXMM[iYRegSrc].au32[2]; 1389 pXState->x87.aXMM[iYRegDst].au32[3] = pXState->x87.aXMM[iYRegSrc].au32[2]; 1390 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[0] = pXState->u.YmmHi.aYmmHi[iYRegSrc].au32[0]; 1391 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[1] = pXState->u.YmmHi.aYmmHi[iYRegSrc].au32[0]; 1392 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[2] = pXState->u.YmmHi.aYmmHi[iYRegSrc].au32[2]; 1393 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[3] = pXState->u.YmmHi.aYmmHi[iYRegSrc].au32[2]; 1394 } 1395 1396 1397 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc)) 1398 { 1399 pXState->x87.aXMM[iYRegDst].au32[0] = pSrc->au32[0]; 1400 pXState->x87.aXMM[iYRegDst].au32[1] = pSrc->au32[0]; 1401 pXState->x87.aXMM[iYRegDst].au32[2] = pSrc->au32[2]; 1402 pXState->x87.aXMM[iYRegDst].au32[3] = pSrc->au32[2]; 1403 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[0] = pSrc->au32[4]; 1404 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[1] = pSrc->au32[4]; 1405 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[2] = pSrc->au32[6]; 1406 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[3] = pSrc->au32[6]; 1407 } 1408 1409 1384 1410 IEM_DECL_IMPL_DEF(void, iemAImpl_movshdup,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc)) 1385 1411 { -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py
r66937 r66950 228 228 'Wq': ( 'IDX_UseModRM', 'rm', '%Wq', 'Wq', ), 229 229 'WqZxReg_WO': ( 'IDX_UseModRM', 'rm', '%Wq', 'Wq', ), 230 'Wx': ( 'IDX_UseModRM', 'rm', '%Wx', 'Wx', ), 230 231 231 232 # ModR/M.rm - register only. … … 271 272 'VqHi_WO': ( 'IDX_UseModRM', 'reg', '%Vdq', 'VdqHi', ), 272 273 'VqZx_WO': ( 'IDX_UseModRM', 'reg', '%Vq', 'VqZx', ), 274 'Vx_WO': ( 'IDX_UseModRM', 'reg', '%Vx', 'Vx', ), 273 275 274 276 # VEX.vvvv … … 1145 1147 'o64': 'size_o64', 1146 1148 }, 1149 # VEX.L value. 1150 'vex.l': { 1151 '0': 'vexl_0', 1152 '1': 'vexl_1', 1153 }, 1147 1154 # Execution ring. 1148 1155 'ring': { -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsVexMap1.cpp.h
r66937 r66950 740 740 741 741 742 //FNIEMOP_DEF(iemOp_vmovlpd_Vq_Hq_Mq)743 //{744 // uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);745 // if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))746 // {747 // IEMOP_MNEMONIC2(RM_MEM, VMOVLPD, vmovlpd, Vq, Mq, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE);748 //749 // IEM_MC_BEGIN(0, 2);750 // IEM_MC_LOCAL(uint64_t, uSrc);751 // IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);752 //753 // IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);754 // IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();755 // IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();756 // IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();757 //758 // IEM_MC_FETCH_MEM_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc);759 // IEM_MC_STORE_XREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, uSrc);760 //761 // IEM_MC_ADVANCE_RIP();762 // IEM_MC_END();763 // return VINF_SUCCESS;764 // }765 //766 // /**767 // * @ opdone768 // * @ opmnemonic ud660f12m3769 // * @ opcode 0x12770 // * @ opcodesub 11 mr/reg771 // * @ oppfx 0x66772 // * @ opunused immediate773 // * @ opcpuid sse774 // * @ optest ->775 // */776 // return IEMOP_RAISE_INVALID_OPCODE();777 //}778 779 780 742 /** 781 * @ opcode 0x12 782 * @ oppfx 0xf3 783 * @ opcpuid sse3 784 * @ opgroup og_sse3_pcksclr_datamove 785 * @ opxcpttype 4 786 * @ optest op1=-1 op2=0xdddddddd00000002eeeeeeee00000001 -> 787 * op1=0x00000002000000020000000100000001 743 * @opcode 0x12 744 * @oppfx 0xf3 745 * @opcpuid avx 746 * @opgroup og_avx_pcksclr_datamove 747 * @opxcpttype 4 748 * @optest vex.l==0 / op1=-1 op2=0xdddddddd00000002eeeeeeee00000001 749 * -> op1=0x00000002000000020000000100000001 750 * @optest vex.l==1 / 751 * op2=0xbbbbbbbb00000004cccccccc00000003dddddddd00000002eeeeeeee00000001 752 * -> op1=0x0000000400000004000000030000000300000002000000020000000100000001 753 * @oponly 788 754 */ 789 FNIEMOP_STUB(iemOp_vmovsldup_Vx_Wx); 790 //FNIEMOP_DEF(iemOp_vmovsldup_Vx_Wx) 791 //{ 792 // IEMOP_MNEMONIC2(RM, VMOVSLDUP, vmovsldup, Vdq, Wdq, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE); 793 // uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 794 // if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 795 // { 796 // /* 797 // * Register, register. 798 // */ 799 // IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 800 // IEM_MC_BEGIN(2, 0); 801 // IEM_MC_ARG(PRTUINT128U, puDst, 0); 802 // IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 803 // 804 // IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT(); 805 // IEM_MC_PREPARE_SSE_USAGE(); 806 // 807 // IEM_MC_REF_XREG_U128_CONST(puSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 808 // IEM_MC_REF_XREG_U128(puDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 809 // IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_movsldup, puDst, puSrc); 810 // 811 // IEM_MC_ADVANCE_RIP(); 812 // IEM_MC_END(); 813 // } 814 // else 815 // { 816 // /* 817 // * Register, memory. 818 // */ 819 // IEM_MC_BEGIN(2, 2); 820 // IEM_MC_LOCAL(RTUINT128U, uSrc); 821 // IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 822 // IEM_MC_ARG(PRTUINT128U, puDst, 0); 823 // IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 824 // 825 // IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 826 // IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 827 // IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT(); 828 // IEM_MC_PREPARE_SSE_USAGE(); 829 // 830 // IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 831 // IEM_MC_REF_XREG_U128(puDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 832 // IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_movsldup, puDst, puSrc); 833 // 834 // IEM_MC_ADVANCE_RIP(); 835 // IEM_MC_END(); 836 // } 837 // return VINF_SUCCESS; 838 //} 755 FNIEMOP_DEF(iemOp_vmovsldup_Vx_Wx) 756 { 757 IEMOP_MNEMONIC2(VEX_RM, VMOVSLDUP, vmovsldup, Vx_WO, Wx, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE); 758 Assert(pVCpu->iem.s.uVexLength <= 1); 759 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 760 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 761 { 762 /* 763 * Register, register. 764 */ 765 IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV(); 766 if (pVCpu->iem.s.uVexLength == 0) 767 { 768 IEM_MC_BEGIN(2, 0); 769 IEM_MC_ARG(PRTUINT128U, puDst, 0); 770 IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 771 772 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 773 IEM_MC_PREPARE_AVX_USAGE(); 774 775 IEM_MC_REF_XREG_U128_CONST(puSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 776 IEM_MC_REF_XREG_U128(puDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 777 IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_movsldup, puDst, puSrc); 778 IEM_MC_CLEAR_YREG_128_UP(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 779 780 IEM_MC_ADVANCE_RIP(); 781 IEM_MC_END(); 782 } 783 else 784 { 785 IEM_MC_BEGIN(3, 0); 786 IEM_MC_IMPLICIT_AVX_AIMPL_ARGS(); 787 IEM_MC_ARG_CONST(uint8_t, iYRegDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, 1); 788 IEM_MC_ARG_CONST(uint8_t, iYRegSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 2); 789 790 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 791 IEM_MC_PREPARE_AVX_USAGE(); 792 IEM_MC_CALL_AVX_AIMPL_2(iemAImpl_vmovsldup_256_rr, iYRegDst, iYRegSrc); 793 794 IEM_MC_ADVANCE_RIP(); 795 IEM_MC_END(); 796 } 797 } 798 else 799 { 800 /* 801 * Register, memory. 802 */ 803 if (pVCpu->iem.s.uVexLength == 0) 804 { 805 IEM_MC_BEGIN(2, 2); 806 IEM_MC_LOCAL(RTUINT128U, uSrc); 807 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 808 IEM_MC_ARG(PRTUINT128U, puDst, 0); 809 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 810 811 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 812 IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV(); 813 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 814 IEM_MC_PREPARE_AVX_USAGE(); 815 816 IEM_MC_FETCH_MEM_U128(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 817 IEM_MC_REF_XREG_U128(puDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 818 IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_movsldup, puDst, puSrc); 819 IEM_MC_CLEAR_YREG_128_UP(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 820 821 IEM_MC_ADVANCE_RIP(); 822 IEM_MC_END(); 823 } 824 else 825 { 826 IEM_MC_BEGIN(3, 2); 827 IEM_MC_LOCAL(RTUINT256U, uSrc); 828 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 829 IEM_MC_IMPLICIT_AVX_AIMPL_ARGS(); 830 IEM_MC_ARG_CONST(uint8_t, iYRegDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, 1); 831 IEM_MC_ARG_LOCAL_REF(PCRTUINT256U, puSrc, uSrc, 2); 832 833 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 834 IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV(); 835 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 836 IEM_MC_PREPARE_AVX_USAGE(); 837 838 IEM_MC_FETCH_MEM_U256(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 839 IEM_MC_CALL_AVX_AIMPL_2(iemAImpl_vmovsldup_256_rm, iYRegDst, puSrc); 840 841 IEM_MC_ADVANCE_RIP(); 842 IEM_MC_END(); 843 } 844 } 845 return VINF_SUCCESS; 846 } 839 847 840 848 -
trunk/src/VBox/VMM/include/IEMInternal.h
r66932 r66950 1636 1636 IEM_DECL_IMPL_DEF(void, iemAImpl_movshdup,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc)); 1637 1637 IEM_DECL_IMPL_DEF(void, iemAImpl_movddup,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, uint64_t uSrc)); 1638 1639 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc)); 1640 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc)); 1641 1638 1642 /** @} */ 1639 1643 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r66935 r66950 382 382 a_Type const a_Name = (a_Value); \ 383 383 NOREF(a_Name) 384 #define IEM_MC_ARG_XSTATE(a_Name, a_iArg) \ 385 IEM_MC_ARG_CONST(PX86XSAVEAREA, a_Name, NULL, a_iArg) 386 384 387 #define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) \ 385 388 RT_CONCAT(iArgCheck_, a_iArg) = 1; NOREF(RT_CONCAT(iArgCheck_,a_iArg)); \ … … 531 534 #define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Value) do { CHK_TYPE(RTUINT128U, a_u128Value); (void)fAvxWrite; } while (0) 532 535 #define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Value) do { CHK_TYPE(RTUINT256U, a_u256Value); (void)fAvxWrite; } while (0) 536 #define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) do { (a_pu128Dst) = (PRTUINT128U)((uintptr_t)0); CHK_PTYPE(PRTUINT128U, a_pu128Dst); (void)fAvxWrite; } while (0) 537 #define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) do { (a_pu128Dst) = (PCRTUINT128U)((uintptr_t)0); CHK_PTYPE(PCRTUINT128U, a_pu128Dst); (void)fAvxWrite; } while (0) 538 #define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) do { (a_pu64Dst) = (uint64_t const *)((uintptr_t)0); CHK_PTYPE(uint64_t const *, a_pu64Dst); (void)fAvxWrite; } while (0) 539 #define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) do { (void)fAvxWrite; } while (0) 533 540 #define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) do { (void)fAvxWrite; } while (0) 534 541 #define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) do { (void)fAvxWrite; } while (0) … … 577 584 #define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT128U, a_u128Dst);} while (0) 578 585 #define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT256U, a_u256Dst);} while (0) 579 #define IEM_MC_FETCH_MEM_U256_ALIGN_ SSE(a_u256Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT256U, a_u256Dst);} while (0)586 #define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT256U, a_u256Dst);} while (0) 580 587 581 588 #define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(uint8_t, a_u8Value); CHK_SEG_IDX(a_iSeg); } while (0) … … 680 687 #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() const int fSseRead = 1 681 688 #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() const int fSseRead = 1, fSseWrite = 1 682 #define IEM_MC_PREPARE_AVX_USAGE() const int fAvxRead = 1, fAvxWrite = 1, fAvxHost = 1 683 #define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() const int fAvxRead = 1 684 #define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() const int fAvxRead = 1, fAvxWrite = 1 689 #define IEM_MC_PREPARE_AVX_USAGE() const int fAvxRead = 1, fAvxWrite = 1, fAvxHost = 1, fSseRead = 1, fSseWrite = 1, fSseHost = 1 690 #define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() const int fAvxRead = 1, fSseRead = 1 691 #define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() const int fAvxRead = 1, fAvxWrite = 1, fSseRead = 1, fSseWrite = 1 685 692 686 693 #define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \ … … 692 699 #define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \ 693 700 do { (void)fSseHost; (void)fSseWrite; CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2);} while (0) 701 #define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0) 702 #define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \ 703 do { (void)fAvxHost; (void)fAvxWrite; CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); } while (0) 704 #define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \ 705 do { (void)fAvxHost; (void)fAvxWrite; CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); CHK_CALL_ARG(a3, 3);} while (0) 706 #define IEM_MC_CALL_AVX_AIMPL_4(a_pfnAImpl, a1, a2, a3, a4) \ 707 do { (void)fAvxHost; (void)fAvxWrite; CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); CHK_CALL_ARG(a3, 3); CHK_CALL_ARG(a4, 4);} while (0) 694 708 695 709 #define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (g_fRandom) { -
trunk/src/VBox/ValidationKit/bootsectors/bs3-cpu-generated-1-template.c
r66937 r66950 101 101 BS3CG1OPLOC_END 102 102 } BS3CG1OPLOC; 103 AssertCompile(BS3CG1OPLOC_END <= 16); 103 104 104 105 … … 165 166 /** Operand size in bytes (0 if not applicable). */ 166 167 uint8_t cbOperand; 168 /** Current VEX.L value (UINT8_MAX if not applicable). */ 169 uint8_t uVexL; 167 170 /** Current target ring (0..3). */ 168 171 uint8_t uCpl; … … 208 211 /** BS3CG1OPLOC_XXX. */ 209 212 uint8_t enmLocation; 213 /** BS3CG1OPLOC_XXX for memory encodings (MODRM.rm field). */ 214 uint8_t enmLocationMem : 4; 215 /** BS3CG1OPLOC_XXX for register encodings (MODRM.rm field). */ 216 uint8_t enmLocationReg : 4; 210 217 /** The BS3CG1DST value for this field. 211 218 * Set to BS3CG1DST_INVALID if memory or immediate. */ 212 219 uint8_t idxField; 213 220 /** The base BS3CG1DST value for this field. 214 * Used only by some generalized encoders when dealing with 215 * registers. */ 221 * Used only by some generalized encoders when dealing with registers. */ 216 222 uint8_t idxFieldBase; 217 223 /** Depends on enmLocation. … … 1167 1173 1168 1174 /** 1169 * Checks if >= 16 byte SSE /AVXalignment are exempted for the exception type.1175 * Checks if >= 16 byte SSE alignment are exempted for the exception type. 1170 1176 * 1171 1177 * @returns true / false. … … 1176 1182 switch (enmXcptType) 1177 1183 { 1184 case BS3CG1XCPTTYPE_1: 1185 case BS3CG1XCPTTYPE_2: 1186 case BS3CG1XCPTTYPE_4: 1187 return false; 1188 case BS3CG1XCPTTYPE_3: 1178 1189 case BS3CG1XCPTTYPE_4UA: 1179 1190 case BS3CG1XCPTTYPE_5: 1180 1191 return true; 1192 default: 1193 return false; 1194 } 1195 } 1196 1197 1198 /** 1199 * Checks if >= 16 byte AVX alignment are exempted for the exception type. 1200 * 1201 * @returns true / false. 1202 * @param enmXcptType The type to check. 1203 */ 1204 static bool BS3_NEAR_CODE Bs3Cg1XcptTypeIsVexUnaligned(BS3CG1XCPTTYPE enmXcptType) 1205 { 1206 switch (enmXcptType) 1207 { 1208 case BS3CG1XCPTTYPE_1: 1209 return false; 1210 1211 case BS3CG1XCPTTYPE_2: 1212 case BS3CG1XCPTTYPE_3: 1213 case BS3CG1XCPTTYPE_4: 1214 case BS3CG1XCPTTYPE_4UA: 1215 case BS3CG1XCPTTYPE_5: 1216 case BS3CG1XCPTTYPE_6: 1217 case BS3CG1XCPTTYPE_11: 1218 case BS3CG1XCPTTYPE_12: 1219 return true; 1220 1181 1221 default: 1182 1222 return false; … … 1319 1359 1320 1360 1361 #if 0 /* unused */ 1321 1362 /** Also encodes idxField of the register operand using idxFieldBase. */ 1322 1363 static unsigned BS3_NEAR_CODE … … 1327 1368 return Bs3Cfg1EncodeMemMod0Disp(pThis, fAddrOverride, off, iReg & 7, cbOp, cbMissalign, enmLocation); 1328 1369 } 1329 1370 #endif 1330 1371 1331 1372 /** Also encodes idxField of the register operand using idxFieldBase. */ … … 2185 2226 pThis->abCurInstr[offDst] = 0xc5; /* vex2 */ 2186 2227 pThis->abCurInstr[offDst + 1] = b; 2228 pThis->uVexL = uVexL; 2187 2229 return offDst + 2; 2188 2230 } … … 2228 2270 pThis->abCurInstr[offDst + 1] = b1; 2229 2271 pThis->abCurInstr[offDst + 2] = b2; 2272 pThis->uVexL = uVexL; 2230 2273 return offDst + 3; 2231 2274 } … … 2286 2329 off = Bs3Cg1InsertOpcodes(pThis, off); 2287 2330 off = Bs3Cfg1EncodeMemMod0Disp(pThis, false, off, 3 /*iReg*/, 16, 1 /*cbMissalign*/, BS3CG1OPLOC_MEM); 2288 if (!Bs3Cg1XcptTypeIs Unaligned(pThis->enmXcptType))2331 if (!Bs3Cg1XcptTypeIsVexUnaligned(pThis->enmXcptType)) 2289 2332 pThis->bAlignmentXcpt = X86_XCPT_GP; 2290 2333 pThis->aOperands[pThis->iRegOp].idxField = BS3CG1DST_XMM3; … … 2295 2338 off = Bs3Cg1InsertOpcodes(pThis, off); 2296 2339 off = Bs3Cfg1EncodeMemMod0Disp(pThis, false, off, 3 /*iReg*/, 16, 1 /*cbMissalign*/, BS3CG1OPLOC_MEM); 2297 if (!Bs3Cg1XcptTypeIs Unaligned(pThis->enmXcptType))2340 if (!Bs3Cg1XcptTypeIsVexUnaligned(pThis->enmXcptType)) 2298 2341 pThis->bAlignmentXcpt = X86_XCPT_GP; 2299 2342 pThis->aOperands[pThis->iRegOp].idxField = BS3CG1DST_XMM3; … … 2371 2414 off = Bs3Cg1InsertOpcodes(pThis, off); 2372 2415 off = Bs3Cfg1EncodeMemMod0Disp(pThis, false, off, 3 /*iReg*/, 32, 1 /*cbMissalign*/, BS3CG1OPLOC_MEM); 2373 if (!Bs3Cg1XcptTypeIs Unaligned(pThis->enmXcptType))2416 if (!Bs3Cg1XcptTypeIsVexUnaligned(pThis->enmXcptType)) 2374 2417 pThis->bAlignmentXcpt = X86_XCPT_GP; 2375 2418 pThis->aOperands[pThis->iRegOp].idxField = BS3CG1DST_YMM3; … … 2380 2423 off = Bs3Cg1InsertOpcodes(pThis, off); 2381 2424 off = Bs3Cfg1EncodeMemMod0Disp(pThis, false, off, 3 /*iReg*/, 32, 1 /*cbMissalign*/, BS3CG1OPLOC_MEM); 2382 if (!Bs3Cg1XcptTypeIs Unaligned(pThis->enmXcptType))2425 if (!Bs3Cg1XcptTypeIsVexUnaligned(pThis->enmXcptType)) 2383 2426 pThis->bAlignmentXcpt = X86_XCPT_GP; 2384 2427 pThis->aOperands[pThis->iRegOp].idxField = BS3CG1DST_YMM3; … … 2967 3010 * Wip = VEX.W ignored. 2968 3011 */ 2969 static unsigned BS3_NEAR_CODE Bs3Cg1EncodeNext_VEX_MODRM_WsomethingWO_Vsomething_Wip(PBS3CG1STATE pThis, unsigned iEncoding) 3012 static unsigned BS3_NEAR_CODE 3013 Bs3Cg1EncodeNext_VEX_MODRM_WsomethingWO_Vsomething_Wip_OR_ViceVersa(PBS3CG1STATE pThis, unsigned iEncoding) 2970 3014 { 2971 3015 unsigned off; … … 2977 3021 off = Bs3Cg1InsertOpcodes(pThis, off); 2978 3022 pThis->abCurInstr[off++] = X86_MODRM_MAKE(3, 1, 0); 3023 pThis->aOperands[pThis->iRmOp ].enmLocation = pThis->aOperands[pThis->iRegOp].enmLocationReg; 2979 3024 pThis->aOperands[pThis->iRmOp ].idxField = pThis->aOperands[pThis->iRmOp ].idxFieldBase + 0; 2980 3025 pThis->aOperands[pThis->iRegOp].idxField = pThis->aOperands[pThis->iRegOp].idxFieldBase + 1; … … 2996 3041 break; 2997 3042 case 3: 3043 pThis->aOperands[pThis->iRmOp].enmLocation = pThis->aOperands[pThis->iRmOp].enmLocationMem; 2998 3044 off = Bs3Cg1InsertVex2bPrefix(pThis, 0 /*offDst*/, 0xf /*~V*/, 0 /*L*/, 1 /*~R*/); 2999 3045 off = Bs3Cg1InsertOpcodes(pThis, off); 3000 off = Bs3Cfg1EncodeMemMod0DispWithRegField (pThis, false, off, 2 /*iReg*/, 16, 0, BS3CG1OPLOC_MEM_WO);3046 off = Bs3Cfg1EncodeMemMod0DispWithRegFieldAndDefaults(pThis, false, off, 2 /*iReg*/, 0); 3001 3047 break; 3002 3048 case 4: 3003 3049 off = Bs3Cg1InsertVex3bPrefix(pThis, 0 /*offDst*/, 0xf /*~V*/, 0 /*L*/, 1 /*~R*/, 1 /*~X*/, 1 /*~B*/, 0 /*W*/); 3004 3050 off = Bs3Cg1InsertOpcodes(pThis, off); 3005 off = Bs3Cfg1EncodeMemMod0DispWithRegField (pThis, false, off, 3 /*iReg*/, 16, 0, BS3CG1OPLOC_MEM_WO);3051 off = Bs3Cfg1EncodeMemMod0DispWithRegFieldAndDefaults(pThis, false, off, 3 /*iReg*/, 0); 3006 3052 break; 3007 3053 case 5: 3008 3054 off = Bs3Cg1InsertVex3bPrefix(pThis, 0 /*offDst*/, 0xf /*~V*/, 0 /*L*/, 1 /*~R*/, 1 /*~X*/, 1 /*~B*/, 1 /*W - ignored */); 3009 3055 off = Bs3Cg1InsertOpcodes(pThis, off); 3010 off = Bs3Cfg1EncodeMemMod0DispWithRegField (pThis, false, off, 3 /*iReg*/, 16, 0, BS3CG1OPLOC_MEM_WO);3056 off = Bs3Cfg1EncodeMemMod0DispWithRegFieldAndDefaults(pThis, false, off, 3 /*iReg*/, 0); 3011 3057 break; 3012 3058 case 6: 3013 3059 off = Bs3Cg1InsertVex2bPrefix(pThis, 0 /*offDst*/, 0xf /*~V*/, 0 /*L*/, 1 /*~R*/); 3014 3060 off = Bs3Cg1InsertOpcodes(pThis, off); 3015 off = Bs3Cfg1EncodeMemMod0DispWithRegField (pThis, false, off, 3 /*iReg*/, 16, 1 /*cbMissalign*/, BS3CG1OPLOC_MEM_WO);3016 if (!Bs3Cg1XcptTypeIs Unaligned(pThis->enmXcptType))3061 off = Bs3Cfg1EncodeMemMod0DispWithRegFieldAndDefaults(pThis, false, off, 3 /*iReg*/, 1 /*cbMissalign*/); 3062 if (!Bs3Cg1XcptTypeIsVexUnaligned(pThis->enmXcptType)) 3017 3063 pThis->bAlignmentXcpt = X86_XCPT_GP; 3018 3064 break; … … 3020 3066 off = Bs3Cg1InsertVex3bPrefix(pThis, 0 /*offDst*/, 0xf /*~V*/, 0 /*L*/, 1 /*~R*/, 1 /*~X*/, 1 /*~B*/, 0 /*W*/); 3021 3067 off = Bs3Cg1InsertOpcodes(pThis, off); 3022 off = Bs3Cfg1EncodeMemMod0DispWithRegField (pThis, false, off, 3 /*iReg*/, 16, 1 /*cbMissalign*/, BS3CG1OPLOC_MEM_WO);3023 if (!Bs3Cg1XcptTypeIs Unaligned(pThis->enmXcptType))3068 off = Bs3Cfg1EncodeMemMod0DispWithRegFieldAndDefaults(pThis, false, off, 3 /*iReg*/, 1 /*cbMissalign*/); 3069 if (!Bs3Cg1XcptTypeIsVexUnaligned(pThis->enmXcptType)) 3024 3070 pThis->bAlignmentXcpt = X86_XCPT_GP; 3025 3071 break; … … 3029 3075 off = Bs3Cg1InsertOpcodes(pThis, off); 3030 3076 pThis->abCurInstr[off++] = X86_MODRM_MAKE(3, 1, 0); 3077 pThis->aOperands[pThis->iRmOp ].enmLocation = pThis->aOperands[pThis->iRmOp].enmLocationReg; 3031 3078 pThis->aOperands[pThis->iRmOp ].idxField = pThis->aOperands[pThis->iRmOp ].idxFieldBase + 0; 3032 3079 pThis->aOperands[pThis->iRegOp].idxField = pThis->aOperands[pThis->iRegOp].idxFieldBase + 1; 3033 pThis->aOperands[pThis->iRmOp ].enmLocation = BS3CG1OPLOC_CTX_ZX_VLMAX;3034 3080 pThis->fInvalidEncoding = true; 3035 3081 break; … … 3052 3098 pThis->aOperands[pThis->iRmOp ].cbOp = 32; 3053 3099 pThis->aOperands[pThis->iRmOp ].idxFieldBase = BS3CG1DST_YMM0; 3054 pThis->aOperands[pThis->iRmOp ].enmLocation = BS3CG1OPLOC_CTX_ZX_VLMAX;3100 pThis->aOperands[pThis->iRmOp ].enmLocation = pThis->aOperands[pThis->iRmOp].enmLocationReg; 3055 3101 pThis->aOperands[pThis->iRegOp].cbOp = 32; 3056 3102 pThis->aOperands[pThis->iRegOp].idxFieldBase = BS3CG1DST_YMM0; … … 3079 3125 off = Bs3Cg1InsertVex2bPrefix(pThis, 0 /*offDst*/, 0xf /*~V*/, 1 /*L*/, 1 /*~R*/); 3080 3126 off = Bs3Cg1InsertOpcodes(pThis, off); 3081 off = Bs3Cfg1EncodeMemMod0DispWithRegField(pThis, false, off, 2 /*iReg*/, 32, 0, BS3CG1OPLOC_MEM_WO); 3127 pThis->aOperands[pThis->iRmOp ].enmLocation = pThis->aOperands[pThis->iRmOp].enmLocationMem; 3128 off = Bs3Cfg1EncodeMemMod0DispWithRegFieldAndDefaults(pThis, false, off, 2 /*iReg*/, 0); 3082 3129 break; 3083 3130 case 24: 3084 3131 off = Bs3Cg1InsertVex3bPrefix(pThis, 0 /*offDst*/, 0xf /*~V*/, 1 /*L*/, 1 /*~R*/, 1 /*~X*/, 1 /*~B*/, 0 /*W*/); 3085 3132 off = Bs3Cg1InsertOpcodes(pThis, off); 3086 off = Bs3Cfg1EncodeMemMod0DispWithRegField (pThis, false, off, 3 /*iReg*/, 32, 0, BS3CG1OPLOC_MEM_WO);3133 off = Bs3Cfg1EncodeMemMod0DispWithRegFieldAndDefaults(pThis, false, off, 3 /*iReg*/, 0); 3087 3134 break; 3088 3135 case 25: 3089 3136 off = Bs3Cg1InsertVex3bPrefix(pThis, 0 /*offDst*/, 0xf /*~V*/, 1 /*L*/, 1 /*~R*/, 1 /*~X*/, 1 /*~B*/, 1 /*W - ignored */); 3090 3137 off = Bs3Cg1InsertOpcodes(pThis, off); 3091 off = Bs3Cfg1EncodeMemMod0DispWithRegField (pThis, false, off, 3 /*iReg*/, 32, 0, BS3CG1OPLOC_MEM_WO);3138 off = Bs3Cfg1EncodeMemMod0DispWithRegFieldAndDefaults(pThis, false, off, 3 /*iReg*/, 0); 3092 3139 break; 3093 3140 case 26: 3094 3141 off = Bs3Cg1InsertVex2bPrefix(pThis, 0 /*offDst*/, 0xf /*~V*/, 1 /*L*/, 1 /*~R*/); 3095 3142 off = Bs3Cg1InsertOpcodes(pThis, off); 3096 off = Bs3Cfg1EncodeMemMod0DispWithRegField (pThis, false, off, 3 /*iReg*/, 32, 1 /*cbMissalign*/, BS3CG1OPLOC_MEM_WO);3097 if (!Bs3Cg1XcptTypeIs Unaligned(pThis->enmXcptType))3143 off = Bs3Cfg1EncodeMemMod0DispWithRegFieldAndDefaults(pThis, false, off, 3 /*iReg*/, 1 /*cbMissalign*/); 3144 if (!Bs3Cg1XcptTypeIsVexUnaligned(pThis->enmXcptType)) 3098 3145 pThis->bAlignmentXcpt = X86_XCPT_GP; 3099 3146 break; … … 3101 3148 off = Bs3Cg1InsertVex3bPrefix(pThis, 0 /*offDst*/, 0xf /*~V*/, 1 /*L*/, 1 /*~R*/, 1 /*~X*/, 1 /*~B*/, 0 /*W*/); 3102 3149 off = Bs3Cg1InsertOpcodes(pThis, off); 3103 off = Bs3Cfg1EncodeMemMod0DispWithRegField (pThis, false, off, 3 /*iReg*/, 32, 1 /*cbMissalign*/, BS3CG1OPLOC_MEM_WO);3104 if (!Bs3Cg1XcptTypeIs Unaligned(pThis->enmXcptType))3150 off = Bs3Cfg1EncodeMemMod0DispWithRegFieldAndDefaults(pThis, false, off, 3 /*iReg*/, 1 /*cbMissalign*/); 3151 if (!Bs3Cg1XcptTypeIsVexUnaligned(pThis->enmXcptType)) 3105 3152 pThis->bAlignmentXcpt = X86_XCPT_GP; 3106 3153 break; … … 3110 3157 off = Bs3Cg1InsertOpcodes(pThis, off); 3111 3158 pThis->abCurInstr[off++] = X86_MODRM_MAKE(3, 1, 0); 3159 pThis->aOperands[pThis->iRmOp ].enmLocation = pThis->aOperands[pThis->iRmOp].enmLocationReg; 3112 3160 pThis->aOperands[pThis->iRmOp ].idxField = pThis->aOperands[pThis->iRmOp ].idxFieldBase + 0; 3113 3161 pThis->aOperands[pThis->iRegOp].idxField = pThis->aOperands[pThis->iRegOp].idxFieldBase + 1; … … 3287 3335 { 3288 3336 pThis->bAlignmentXcpt = UINT8_MAX; 3337 pThis->uVexL = UINT8_MAX; 3289 3338 if (pThis->pfnEncoder) 3290 3339 return pThis->pfnEncoder(pThis, iEncoding); … … 3696 3745 break; 3697 3746 3747 case BS3CG1ENC_VEX_MODRM_Vx_WO_Wx: 3748 pThis->pfnEncoder = Bs3Cg1EncodeNext_VEX_MODRM_WsomethingWO_Vsomething_Wip_OR_ViceVersa; 3749 pThis->iRmOp = 1; 3750 pThis->iRegOp = 0; 3751 pThis->aOperands[0].cbOp = 16; 3752 pThis->aOperands[1].cbOp = 16; 3753 pThis->aOperands[0].enmLocation = BS3CG1OPLOC_CTX_ZX_VLMAX; 3754 pThis->aOperands[1].enmLocation = BS3CG1OPLOC_CTX; 3755 pThis->aOperands[1].enmLocationReg = BS3CG1OPLOC_CTX; 3756 pThis->aOperands[1].enmLocationMem = BS3CG1OPLOC_MEM; 3757 pThis->aOperands[0].idxFieldBase = BS3CG1DST_XMM0; 3758 pThis->aOperands[1].idxFieldBase = BS3CG1DST_XMM0; 3759 break; 3760 3698 3761 case BS3CG1ENC_VEX_MODRM_Md_WO_Vss: 3699 3762 pThis->pfnEncoder = Bs3Cg1EncodeNext_VEX_MODRM_VsomethingWO_Msomething_Wip_Lig_OR_ViceVersa; … … 3752 3815 case BS3CG1ENC_VEX_MODRM_Wps_WO_Vps: 3753 3816 case BS3CG1ENC_VEX_MODRM_Wpd_WO_Vpd: 3754 pThis->pfnEncoder = Bs3Cg1EncodeNext_VEX_MODRM_WsomethingWO_Vsomething_Wip ;3817 pThis->pfnEncoder = Bs3Cg1EncodeNext_VEX_MODRM_WsomethingWO_Vsomething_Wip_OR_ViceVersa; 3755 3818 pThis->iRmOp = 0; 3756 3819 pThis->iRegOp = 1; 3757 3820 pThis->aOperands[0].cbOp = 16; 3758 3821 pThis->aOperands[1].cbOp = 16; 3759 pThis->aOperands[0].enmLocation = BS3CG1OPLOC_CTX_ZX_VLMAX; 3760 pThis->aOperands[1].enmLocation = BS3CG1OPLOC_CTX; 3761 pThis->aOperands[0].idxFieldBase = BS3CG1DST_XMM0; 3762 pThis->aOperands[1].idxFieldBase = BS3CG1DST_XMM0; 3822 pThis->aOperands[0].enmLocation = BS3CG1OPLOC_CTX_ZX_VLMAX; 3823 pThis->aOperands[0].enmLocationReg = BS3CG1OPLOC_CTX_ZX_VLMAX; 3824 pThis->aOperands[0].enmLocationMem = BS3CG1OPLOC_MEM; 3825 pThis->aOperands[1].enmLocation = BS3CG1OPLOC_CTX; 3826 pThis->aOperands[0].idxFieldBase = BS3CG1DST_XMM0; 3827 pThis->aOperands[1].idxFieldBase = BS3CG1DST_XMM0; 3763 3828 break; 3764 3829 … … 4047 4112 CASE_PRED(BS3CG1PRED_SIZE_O32, pThis->cbOperand == 4); 4048 4113 CASE_PRED(BS3CG1PRED_SIZE_O64, pThis->cbOperand == 8); 4114 CASE_PRED(BS3CG1PRED_VEXL_0, pThis->uVexL == 0); 4115 CASE_PRED(BS3CG1PRED_VEXL_1, pThis->uVexL == 1); 4049 4116 CASE_PRED(BS3CG1PRED_RING_0, pThis->uCpl == 0); 4050 4117 CASE_PRED(BS3CG1PRED_RING_1, pThis->uCpl == 1); … … 5357 5424 } 5358 5425 } 5426 #if 1 5427 else ASMHalt(); 5428 #endif 5359 5429 } 5360 5430 else -
trunk/src/VBox/ValidationKit/bootsectors/bs3-cpu-generated-1.h
r66937 r66950 57 57 BS3CG1OP_Wq_WO, 58 58 BS3CG1OP_WqZxReg_WO, 59 BS3CG1OP_Wx, 59 60 60 61 BS3CG1OP_Gb, … … 89 90 BS3CG1OP_VqHi_WO, 90 91 BS3CG1OP_VqZx_WO, 92 BS3CG1OP_Vx_WO, 91 93 92 94 BS3CG1OP_Ib, … … 157 159 BS3CG1ENC_VEX_MODRM_VssZx_WO_Md, 158 160 BS3CG1ENC_VEX_MODRM_VsdZx_WO_Mq, 161 BS3CG1ENC_VEX_MODRM_Vx_WO_Wx, 159 162 BS3CG1ENC_VEX_MODRM_Md_WO, 160 163 BS3CG1ENC_VEX_MODRM_Md_WO_Vss, … … 676 679 BS3CG1PRED_SIZE_O32, 677 680 BS3CG1PRED_SIZE_O64, 681 /* VEX.L values. */ 682 BS3CG1PRED_VEXL_0, 683 BS3CG1PRED_VEXL_1, 678 684 /* Execution ring. */ 679 685 BS3CG1PRED_RING_0,
Note:
See TracChangeset
for help on using the changeset viewer.

