VirtualBox

Changeset 101248 in vbox


Ignore:
Timestamp:
Sep 24, 2023 2:48:56 AM (12 months ago)
Author:
vboxsync
Message:

VMM/IEM: Ran first native arm TB. Executable memory fun prevents easily running any further blocks. bugref:10370

Location:
trunk
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/iprt/armv8.h

    r101246 r101248  
    22022202/** @} */
    22032203
     2204
     2205#if (!defined(VBOX_FOR_DTRACE_LIB) && defined(__cplusplus) && !defined(ARMV8_WITHOUT_MK_INSTR)) || defined(DOXYGEN_RUNNING)
     2206/** @def grp_rt_armv8_mkinstr   Instruction Encoding Helpers
     2207 *
     2208 * A few inlined functions and macros for assiting in encoding common ARMv8
     2209 * instructions.
     2210 *
     2211 * @{ */
     2212
     2213/** A64: Return instruction. */
     2214#define ARMV8_A64_INSTR_RET         UINT32_C(0xd65f03c0)
     2215
     2216
     2217typedef enum
     2218{
     2219    /** Add @a iImm7*sizeof(reg) to @a iBaseReg after the store/load,
     2220     * and update the register. */
     2221    kArm64InstrStLdPairType_kPostIndex = 1,
     2222    /** Add @a iImm7*sizeof(reg) to @a iBaseReg before the store/load,
     2223     * but don't update the register. */
     2224    kArm64InstrStLdPairType_kSigned    = 2,
     2225    /** Add @a iImm7*sizeof(reg) to @a iBaseReg before the store/load,
     2226     * and update the register. */
     2227    kArm64InstrStLdPairType_kPreIndex  = 3
     2228} ARM64INSTRSTLDPAIRTYPE;
     2229
     2230/**
     2231 * A64: Encodes either stp (store register pair) or ldp (load register pair).
     2232 *
     2233 * @returns The encoded instruction.
     2234 * @param   fLoad       true for ldp, false of stp.
     2235 * @param   iOpc        When @a fSimdFp is @c false:
     2236 *                          - 0 for 32-bit GPRs (Wt).
     2237 *                          - 1 for encoding stgp or ldpsw.
     2238 *                          - 2 for 64-bit GRPs (Xt).
     2239 *                          - 3 illegal.
     2240 *                      When @a fSimdFp is @c true:
     2241 *                          - 0 for 32-bit SIMD&FP registers (St).
     2242 *                          - 1 for 64-bit SIMD&FP registers (Dt).
     2243 *                          - 2 for 128-bit SIMD&FP regsiters (Qt).
     2244 * @param   enmType     The instruction variant wrt addressing and updating of the
     2245 *                      addressing register.
     2246 * @param   iReg1       The first register to store/load.
     2247 * @param   iReg2       The second register to store/load.
     2248 * @param   iBaseReg    The base register to use when addressing. SP is allowed.
     2249 * @param   iImm7       Signed addressing immediate value scaled, range -64..63,
     2250 *                      will be multiplied by the register size.
     2251 * @param   fSimdFp     true for SIMD&FP registers, false for GPRs and
     2252 *                      stgp/ldpsw instructions.
     2253 */
     2254DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrStLdPair(bool fLoad, uint32_t iOpc, ARM64INSTRSTLDPAIRTYPE enmType,
     2255                                                    uint32_t iReg1, uint32_t iReg2, uint32_t iBaseReg, int32_t iImm7 = 0,
     2256                                                    bool fSimdFp = false)
     2257{
     2258    Assert(iOpc < 3); Assert(iReg1 <= 31); Assert(iReg2 <= 31); Assert(iBaseReg <= 31); Assert(iImm7 < 64 && iImm7 >= -64);
     2259    return (iOpc                               << 30)
     2260         | UINT32_C(0x28000000)
     2261         | ((uint32_t)fSimdFp                  << 26) /* VR bit, see "Top-level encodings for A64" */
     2262         | ((uint32_t)enmType                  << 23)
     2263         | ((uint32_t)fLoad                    << 22)
     2264         | (((uint32_t)iImm7 & UINT32_C(0x7f)) << 15)
     2265         | (iReg2                              << 10)
     2266         | (iBaseReg                           <<  5)
     2267         | iReg1;
     2268}
     2269
     2270
     2271typedef enum
     2272{
     2273    kArmv8A64InstrShift_kLsl = 0,
     2274    kArmv8A64InstrShift_kLsr,
     2275    kArmv8A64InstrShift_kAsr,
     2276    kArmv8A64InstrShift_kRor
     2277} ARMV8A64INSTRSHIFT;
     2278
     2279
     2280/**
     2281 * A64: Encodes a logical instruction with a shifted 2nd register operand.
     2282 *
     2283 * @returns The encoded instruction.
     2284 * @param   u2Opc           The logical operation to perform.
     2285 * @param   fNot            Whether to complement the 2nd operand.
     2286 * @param   iRegResult      The output register.
     2287 * @param   iReg1           The 1st register operand.
     2288 * @param   iReg2Shifted    The 2nd register operand, to which the optional
     2289 *                          shifting is applied.
     2290 * @param   f64Bit          true for 64-bit GPRs (default), @c false for 32-bit
     2291 *                          GPRs.
     2292 * @param   offShift6       The shift amount (default: none).
     2293 * @param   enmShift        The shift operation (default: LSL).
     2294 */
     2295DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrLogicalShiftedReg(uint32_t u2Opc, bool fNot,
     2296                                                             uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted,
     2297                                                             bool f64Bit, uint32_t offShift6, ARMV8A64INSTRSHIFT enmShift)
     2298{
     2299    Assert(u2Opc < 4); Assert(offShift6 < (f64Bit ? 64 : 32));
     2300    Assert(iRegResult < 32); Assert(iReg1 < 32); Assert(iReg2Shifted < 32);
     2301    return ((uint32_t)f64Bit << 31)
     2302         | (u2Opc << 29)
     2303         | UINT32_C(0x0a000000)
     2304         | ((uint32_t)enmShift << 22)
     2305         | ((uint32_t)fNot     << 21)
     2306         | (iReg2Shifted       << 16)
     2307         | (offShift6          << 10)
     2308         | (iReg1              <<  5)
     2309         | iRegResult;
     2310}
     2311
     2312
     2313/** A64: Encodes an AND instruction.
     2314 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
     2315DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrAnd(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
     2316                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2317{
     2318    return Armv8A64MkInstrLogicalShiftedReg(0, false /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     2319}
     2320
     2321
     2322/** A64: Encodes an BIC instruction.
     2323 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
     2324DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBic(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
     2325                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2326{
     2327    return Armv8A64MkInstrLogicalShiftedReg(0, true /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     2328}
     2329
     2330
     2331/** A64: Encodes an ORR instruction.
     2332 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
     2333DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrOrr(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
     2334                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2335{
     2336    return Armv8A64MkInstrLogicalShiftedReg(1, false /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     2337}
     2338
     2339
     2340/** A64: Encodes an ORN instruction.
     2341 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
     2342DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrOrn(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
     2343                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2344{
     2345    return Armv8A64MkInstrLogicalShiftedReg(1, true /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     2346}
     2347
     2348
     2349/** A64: Encodes an EOR instruction.
     2350 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
     2351DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrEor(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
     2352                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2353{
     2354    return Armv8A64MkInstrLogicalShiftedReg(2, false /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     2355}
     2356
     2357
     2358/** A64: Encodes an EON instruction.
     2359 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
     2360DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrEon(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
     2361                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2362{
     2363    return Armv8A64MkInstrLogicalShiftedReg(2, true /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     2364}
     2365
     2366
     2367/** A64: Encodes an ANDS instruction.
     2368 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
     2369DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrAnds(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
     2370                                                uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2371{
     2372    return Armv8A64MkInstrLogicalShiftedReg(3, false /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     2373}
     2374
     2375
     2376/** A64: Encodes an BICS instruction.
     2377 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
     2378DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBics(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
     2379                                                uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2380{
     2381    return Armv8A64MkInstrLogicalShiftedReg(3, true /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     2382}
     2383
     2384
     2385/**
     2386 * A64: Encodes either add, adds, sub or subs.
     2387 *
     2388 * @returns The encoded instruction.
     2389 * @param   fSub                    true for sub and subs, false for add and
     2390 *                                  adds.
     2391 * @param   iRegResult              The register to store the result in.
     2392 *                                  SP is valid when @a fSetFlags = false,
     2393 *                                  and ZR is valid otherwise.
     2394 * @param   iRegSrc                 The register containing the augend (@a fSub
     2395 *                                  = false) or minuend (@a fSub = true).  SP is
     2396 *                                  a valid registers for all variations.
     2397 * @param   uImm12AddendSubtrahend  The addended (@a fSub = false) or subtrahend
     2398 *                                  (@a fSub = true).
     2399 * @param   f64Bit                  true for 64-bit GRPs (default), false for
     2400 *                                  32-bit GPRs.
     2401 * @param   fSetFlags               Whether to set flags (adds / subs) or not
     2402 *                                  (add / sub - default).
     2403 * @param   fShift12                Whether to shift uImm12AddendSubtrahend 12
     2404 *                                  bits to the left, or not (default).
     2405 */
     2406DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrAddSub(bool fSub, uint32_t iRegResult, uint32_t iRegSrc,
     2407                                                  uint32_t uImm12AddendSubtrahend, bool f64Bit = true, bool fSetFlags = false,
     2408                                                  bool fShift12 = false)
     2409{
     2410    Assert(uImm12AddendSubtrahend < 4096); Assert(iRegSrc < 32); Assert(iRegResult < 32);
     2411    return ((uint32_t)f64Bit       << 31)
     2412         | ((uint32_t)fSub         << 30)
     2413         | ((uint32_t)fSetFlags    << 29)
     2414         | UINT32_C(0x11000000)
     2415         | ((uint32_t)fShift12     << 22)
     2416         | (uImm12AddendSubtrahend << 10)
     2417         | (iRegSrc                <<  5)
     2418         | iRegResult;
     2419}
     2420
     2421
     2422/**
     2423 * A64: Encodes a B (unconditional branch w/ imm) instruction.
     2424 *
     2425 * @returns The encoded instruction.
     2426 * @param   iImm26      Signed number of instruction to jump (i.e. *4).
     2427 */
     2428DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrB(int32_t iImm26)
     2429{
     2430    Assert(iImm26 >= -67108864 && iImm26 < 67108864);
     2431    return UINT32_C(0x14000000) | ((uint32_t)iImm26 & UINT32_C(0x3ffffff));
     2432}
     2433
     2434
     2435/**
     2436 * A64: Encodes a BL (unconditional call w/ imm) instruction.
     2437 *
     2438 * @returns The encoded instruction.
     2439 * @param   iImm26      Signed number of instruction to jump (i.e. *4).
     2440 */
     2441DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBl(int32_t iImm26)
     2442{
     2443    return Armv8A64MkInstrB(iImm26) | RT_BIT_32(31);
     2444}
     2445
     2446
     2447/**
     2448 * A64: Encodes a BR (unconditional branch w/ register) instruction.
     2449 *
     2450 * @returns The encoded instruction.
     2451 * @param   iReg                    The register containing the target address.
     2452 */
     2453DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBr(uint32_t iReg)
     2454{
     2455    Assert(iReg < 32);
     2456    return UINT32_C(0xd61f0000) | (iReg <<  5);
     2457}
     2458
     2459
     2460/**
     2461 * A64: Encodes a BLR instruction.
     2462 *
     2463 * @returns The encoded instruction.
     2464 * @param   iReg                    The register containing the target address.
     2465 */
     2466DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBlr(uint32_t iReg)
     2467{
     2468    return Armv8A64MkInstrBr(iReg) | RT_BIT_32(21);
     2469}
     2470
     2471
     2472/**
     2473 * A64: Encodes CBZ and CBNZ (conditional branch w/ immediate) instructions.
     2474 *
     2475 * @returns The encoded instruction.
     2476 * @param   fJmpIfNotZero   false to jump if register is zero, true to jump if
     2477 *                          its not zero.
     2478 * @param   iImm19          Signed number of instruction to jump (i.e. *4).
     2479 * @param   iReg            The GPR to check for zero / non-zero value.
     2480 * @param   f64Bit          true for 64-bit register, false for 32-bit.
     2481 */
     2482DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrCbzCbnz(bool fJmpIfNotZero, int32_t iImm19, uint32_t iReg, bool f64Bit = true)
     2483{
     2484    Assert(iReg < 32); Assert(iImm19 >= -262144 && iImm19 < 262144);
     2485    return ((uint32_t)f64Bit             << 31)
     2486         | UINT32_C(0x34000000)
     2487         | ((uint32_t)fJmpIfNotZero      << 24)
     2488         | (((uint32_t)iImm19 & 0x7ffff) <<  5)
     2489         | iReg;
     2490}
     2491
     2492
     2493/** @} */
     2494
     2495#endif /* !dtrace && __cplusplus */
     2496
    22042497/** @} */
    22052498
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r101247 r101248  
    501501
    502502    /* Allocate a chunk. */
     503#ifdef RT_OS_DARWIN /** @todo oh carp! This isn't going to work very well with the unpredictability of the simple heap... */
     504    void *pvChunk = RTMemPageAllocEx(pExecMemAllocator->cbChunk, 0);
     505#else
    503506    void *pvChunk = RTMemPageAllocEx(pExecMemAllocator->cbChunk, RTMEMPAGEALLOC_F_EXECUTABLE);
     507#endif
    504508    AssertLogRelReturn(pvChunk, VERR_NO_EXEC_MEMORY);
    505509
     
    765769static void iemExecMemAllocatorReadyForUse(PVMCPUCC pVCpu, void *pv, size_t cb)
    766770{
     771#ifdef RT_OS_DARWIN
     772    int rc = RTMemProtect(pv, cb, RTMEM_PROT_EXEC | RTMEM_PROT_READ);
     773    AssertRC(rc); RT_NOREF(pVCpu);
     774#else
    767775    RT_NOREF(pVCpu, pv, cb);
     776#endif
    768777}
    769778
     
    10511060{
    10521061#ifdef RT_ARCH_AMD64
    1053     /* eax = call status code.*/
     1062    /*
     1063     * AMD64: eax = call status code.
     1064     */
    10541065
    10551066    /* edx = rcPassUp */
     
    10811092
    10821093#elif RT_ARCH_ARM64
    1083     RT_NOREF(pReNative, idxInstr);
    1084     off = UINT32_MAX;
     1094    /*
     1095     * ARM64: w0 = call status code.
     1096     */
     1097    off = iemNativeEmitLoadGprImm64(pReNative, off, ARMV8_A64_REG_X2, idxInstr); /** @todo 32-bit imm load? Fixed counter register? */
     1098    off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, ARMV8_A64_REG_X3, RT_UOFFSETOF(VMCPUCC, iem.s.rcPassUp));
     1099
     1100    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
     1101    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1102
     1103    pu32CodeBuf[off++] = Armv8A64MkInstrOrr(ARMV8_A64_REG_X4, ARMV8_A64_REG_X3, ARMV8_A64_REG_X0, false /*f64Bit*/);
     1104
     1105    uint32_t const idxLabel = iemNativeMakeLabel(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp);
     1106    AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);
     1107    AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX);
     1108    pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(true /*fJmpIfNotZero*/, ARMV8_A64_REG_X4, false /*f64Bit*/);
    10851109
    10861110#else
     
    11771201# endif
    11781202
    1179     /* Check the status code. */
     1203#elif RT_ARCH_ARM64
     1204    /*
     1205     * ARM64:
     1206     */
     1207    off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
     1208    if (cParams > 0)
     1209        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, pCallEntry->auParams[0]);
     1210    if (cParams > 1)
     1211        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, pCallEntry->auParams[1]);
     1212    if (cParams > 2)
     1213        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, pCallEntry->auParams[2]);
     1214    off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0,
     1215                                    (uintptr_t)g_apfnIemThreadedFunctions[pCallEntry->enmFunction]);
     1216
     1217    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1218    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1219
     1220    pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0);
     1221
     1222#else
     1223# error "port me"
     1224#endif
     1225
     1226    /*
     1227     * Check the status code.
     1228     */
    11801229    off = iemNativeEmitCheckCallRetAndPassUp(pReNative, off, pCallEntry->idxInstr);
    11811230    AssertReturn(off != UINT32_MAX, off);
    11821231
     1232    return off;
     1233}
     1234
     1235
     1236/**
     1237 * Emits a standard epilog.
     1238 */
     1239static uint32_t iemNativeEmitRcFiddling(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
     1240{
     1241    /*
     1242     * Generate the rc + rcPassUp fiddling code if needed.
     1243     */
     1244    uint32_t idxLabel = iemNativeFindLabel(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp);
     1245    if (idxLabel != UINT32_MAX)
     1246    {
     1247        Assert(pReNative->paLabels[idxLabel].off == UINT32_MAX);
     1248        pReNative->paLabels[idxLabel].off = off;
     1249
     1250        /* iemNativeHlpExecStatusCodeFiddling(PVMCPUCC pVCpu, int rc, uint8_t idxInstr) */
     1251#ifdef RT_ARCH_AMD64
     1252        /*
     1253         * AMD64:
     1254         */
     1255        uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20);
     1256        AssertReturn(pbCodeBuf, UINT32_MAX);
     1257
     1258        /* Call helper and jump to return point. */
     1259# ifdef RT_OS_WINDOWS
     1260        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_x8,  X86_GREG_xCX); /* cl = instruction number */
     1261        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1262        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xCX, IEMNATIVE_REG_FIXED_PVMCPU);
     1263        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1264        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xAX);
     1265        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1266# else
     1267        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDI, IEMNATIVE_REG_FIXED_PVMCPU);
     1268        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1269        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xSI, X86_GREG_xAX);
     1270        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1271        off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xCX); /* cl = instruction number */
     1272        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1273# endif
     1274        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, (uintptr_t)iemNativeHlpExecStatusCodeFiddling);
     1275        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1276
     1277        pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
     1278        AssertReturn(pbCodeBuf, UINT32_MAX);
     1279        pbCodeBuf[off++] = 0xff;                    /* call rax */
     1280        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX);
     1281
     1282        /* Jump to common return point. */
     1283        uint32_t offRel = pReNative->paLabels[idxReturnLabel].off - (off + 2);
     1284        if (-(int32_t)offRel <= 127)
     1285        {
     1286            pbCodeBuf[off++] = 0xeb;                /* jmp rel8 */
     1287            pbCodeBuf[off++] = (uint8_t)offRel;
     1288            off++;
     1289        }
     1290        else
     1291        {
     1292            offRel -= 3;
     1293            pbCodeBuf[off++] = 0xe9;                /* jmp rel32 */
     1294            pbCodeBuf[off++] = RT_BYTE1(offRel);
     1295            pbCodeBuf[off++] = RT_BYTE2(offRel);
     1296            pbCodeBuf[off++] = RT_BYTE3(offRel);
     1297            pbCodeBuf[off++] = RT_BYTE4(offRel);
     1298        }
     1299        pbCodeBuf[off++] = 0xcc;                    /*  int3 poison */
    11831300
    11841301#elif RT_ARCH_ARM64
    1185     RT_NOREF(pReNative, pCallEntry, cParams);
    1186     off = UINT32_MAX;
    1187 
     1302        /*
     1303         * ARM64:
     1304         */
     1305        off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_CALL_RET_GREG);
     1306        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1307        off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
     1308        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1309        /* IEMNATIVE_CALL_ARG2_GREG is already set. */
     1310        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, (uintptr_t)iemNativeHlpExecStatusCodeFiddling);
     1311        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1312
     1313        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
     1314        AssertReturn(pu32CodeBuf, UINT32_MAX);
     1315        pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0);
     1316
     1317        /* Jump back to the common return point. */
     1318        int32_t const offRel = pReNative->paLabels[idxReturnLabel].off - off;
     1319        pu32CodeBuf[off++] = Armv8A64MkInstrB(offRel);
    11881320#else
    11891321# error "port me"
    11901322#endif
     1323    }
    11911324    return off;
    11921325}
     
    11981331static uint32_t iemNativeEmitEpilog(PIEMRECOMPILERSTATE pReNative, uint32_t off)
    11991332{
     1333    /*
     1334     * Successful return, so clear the return register (eax, w0).
     1335     */
     1336    off = iemNativeEmitGprZero(pReNative,off, IEMNATIVE_CALL_RET_GREG);
     1337    AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1338
     1339    /*
     1340     * Define label for common return point.
     1341     */
     1342    uint32_t const idxReturn = iemNativeMakeLabel(pReNative, kIemNativeLabelType_Return, off);
     1343    AssertReturn(idxReturn != UINT32_MAX, UINT32_MAX);
     1344
     1345    /*
     1346     * Restore registers and return.
     1347     */
    12001348#ifdef RT_ARCH_AMD64
    12011349    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 20);
    12021350    AssertReturn(pbCodeBuf, UINT32_MAX);
    1203 
    1204     /*
    1205      * Successful return, so clear eax.
    1206      */
    1207     pbCodeBuf[off++] = 0x33;                    /* xor eax, eax */
    1208     pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, X86_GREG_xAX, X86_GREG_xAX);
    1209 
    1210     /*
    1211      * Define label for common return point.
    1212      */
    1213     uint32_t const idxReturn = iemNativeMakeLabel(pReNative, kIemNativeLabelType_Return, off);
    1214     AssertReturn(idxReturn != UINT32_MAX, UINT32_MAX);
    12151351
    12161352    /* Reposition esp at the r15 restore point. */
     
    12381374    pbCodeBuf[off++] = 0xcc;                    /* int3 poison */
    12391375
    1240     /*
    1241      * Generate the rc + rcPassUp fiddling code if needed.
    1242      */
    1243     uint32_t idxLabel = iemNativeFindLabel(pReNative, kIemNativeLabelType_NonZeroRetOrPassUp);
    1244     if (idxLabel != UINT32_MAX)
    1245     {
    1246         Assert(pReNative->paLabels[idxLabel].off == UINT32_MAX);
    1247         pReNative->paLabels[idxLabel].off = off;
    1248 
    1249         /* Call helper and jump to return point. */
    1250 # ifdef RT_OS_WINDOWS
    1251         off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_x8,  X86_GREG_xCX); /* cl = instruction number */
    1252         AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1253         off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xCX, IEMNATIVE_REG_FIXED_PVMCPU);
    1254         AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1255         off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xAX);
    1256         AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1257 # else
    1258         off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDI, IEMNATIVE_REG_FIXED_PVMCPU);
    1259         AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1260         off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xSI, X86_GREG_xAX);
    1261         AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1262         off = iemNativeEmitLoadGprFromGpr(pReNative, off, X86_GREG_xDX, X86_GREG_xCX); /* cl = instruction number */
    1263         AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1264 # endif
    1265         off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, (uintptr_t)iemNativeHlpExecStatusCodeFiddling);
    1266         AssertReturn(off != UINT32_MAX, UINT32_MAX);
    1267 
    1268         pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
    1269         AssertReturn(pbCodeBuf, UINT32_MAX);
    1270         pbCodeBuf[off++] = 0xff;                    /* call rax */
    1271         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX);
    1272 
    1273         /* Jump to common return point. */
    1274         uint32_t offRel = pReNative->paLabels[idxReturn].off - (off + 2);
    1275         if (-(int32_t)offRel <= 127)
    1276         {
    1277             pbCodeBuf[off++] = 0xeb;                /* jmp rel8 */
    1278             pbCodeBuf[off++] = (uint8_t)offRel;
    1279             off++;
    1280         }
    1281         else
    1282         {
    1283             offRel -= 3;
    1284             pbCodeBuf[off++] = 0xe9;                /* jmp rel32 */
    1285             pbCodeBuf[off++] = RT_BYTE1(offRel);
    1286             pbCodeBuf[off++] = RT_BYTE2(offRel);
    1287             pbCodeBuf[off++] = RT_BYTE3(offRel);
    1288             pbCodeBuf[off++] = RT_BYTE4(offRel);
    1289         }
    1290         pbCodeBuf[off++] = 0xcc;                    /*  int3 poison */
    1291     }
    1292 
    12931376#elif RT_ARCH_ARM64
    1294     RT_NOREF(pReNative);
    1295     off = UINT32_MAX;
     1377    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
     1378    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1379
     1380    /* ldp x19, x20, [sp #IEMNATIVE_FRAME_VAR_SIZE]! ; Unallocate the variable space and restore x19+x20. */
     1381    AssertCompile(IEMNATIVE_FRAME_VAR_SIZE < 64*8);
     1382    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kPreIndex,
     1383                                                 ARMV8_A64_REG_X19, ARMV8_A64_REG_X20, ARMV8_A64_REG_SP,
     1384                                                 IEMNATIVE_FRAME_VAR_SIZE / 8);
     1385    /* Restore x21 thru x28 + BP and LR (ret address) (SP remains unchanged in the kSigned variant). */
     1386    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     1387                                                 ARMV8_A64_REG_X21, ARMV8_A64_REG_X22, ARMV8_A64_REG_SP, 2);
     1388    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     1389                                                 ARMV8_A64_REG_X23, ARMV8_A64_REG_X24, ARMV8_A64_REG_SP, 4);
     1390    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     1391                                                 ARMV8_A64_REG_X25, ARMV8_A64_REG_X26, ARMV8_A64_REG_SP, 6);
     1392    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     1393                                                 ARMV8_A64_REG_X27, ARMV8_A64_REG_X28, ARMV8_A64_REG_SP, 8);
     1394    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     1395                                                 ARMV8_A64_REG_BP,  ARMV8_A64_REG_LR,  ARMV8_A64_REG_SP, 10);
     1396    AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12);
     1397
     1398    /* add sp, sp, IEMNATIVE_FRAME_SAVE_REG_SIZE ;  */
     1399    AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE < 4096);
     1400    pu32CodeBuf[off++] = Armv8A64MkInstrAddSub(false /*fSub*/, ARMV8_A64_REG_SP, ARMV8_A64_REG_SP, IEMNATIVE_FRAME_SAVE_REG_SIZE);
     1401
     1402    /* ret */
     1403    pu32CodeBuf[off++] = ARMV8_A64_INSTR_RET;
    12961404
    12971405#else
    12981406# error "port me"
    12991407#endif
    1300     return off;
    1301 }
    1302 
    1303 
    1304 typedef enum
    1305 {
    1306     kArm64InstrStLdPairType_kPostIndex = 1,
    1307     kArm64InstrStLdPairType_kSigned    = 2,
    1308     kArm64InstrStLdPairType_kPreIndex  = 3
    1309 } ARM64INSTRSTLDPAIRTYPE;
    1310 
    1311 DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrStLdPair(bool fLoad, uint32_t iOpc, ARM64INSTRSTLDPAIRTYPE enmType,
    1312                                                     uint32_t iReg1, uint32_t iReg2, uint32_t iBaseReg, int32_t iImm7 = 0)
    1313 {
    1314     Assert(iOpc < 3); Assert(iReg1 <= 31); Assert(iReg2 <= 31); Assert(iBaseReg <= 31); Assert(iImm7 < 64 && iImm7 >= -64);
    1315     return (iOpc << 30)
    1316          | UINT32_C(0x28000000)
    1317          | ((uint32_t)enmType << 23)
    1318          | ((uint32_t)fLoad << 22)
    1319          | ((uint32_t)iImm7 << 15)
    1320          | (iReg2 << 10)
    1321          | (iBaseReg << 5)
    1322          | iReg1;
    1323 }
    1324 
     1408
     1409    return iemNativeEmitRcFiddling(pReNative, off, idxReturn);
     1410}
    13251411
    13261412
     
    13841470    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
    13851471    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1472
    13861473    /* stp x19, x20, [sp, #-IEMNATIVE_FRAME_SAVE_REG_SIZE] ; Allocate space for saving registers and place x19+x20 at the bottom. */
    13871474    AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE < 64*8);
     
    14001487    /* Save the BP and LR (ret address) registers at the top of the frame. */
    14011488    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
    1402                                                  ARMV8_A64_REG_BP, ARMV8_A64_REG_LR, ARMV8_A64_REG_SP, 10);
     1489                                                 ARMV8_A64_REG_BP,  ARMV8_A64_REG_LR, ARMV8_A64_REG_SP, 10);
    14031490    AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12);
    1404     /* sub bp, sp, IEMNATIVE_FRAME_SAVE_REG_SIZE - 16 ; Set BP to point to the old BP stack address. */
    1405     AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE - 16 < 4096);
    1406     pu32CodeBuf[off++] = UINT32_C(0xd1000000) | ((IEMNATIVE_FRAME_SAVE_REG_SIZE - 16) << 10) | ARMV8_A64_REG_SP | ARMV8_A64_REG_BP;
     1491    /* add bp, sp, IEMNATIVE_FRAME_SAVE_REG_SIZE - 16 ; Set BP to point to the old BP stack address. */
     1492    pu32CodeBuf[off++] = Armv8A64MkInstrAddSub(false /*fSub*/, ARMV8_A64_REG_BP,
     1493                                               ARMV8_A64_REG_SP, IEMNATIVE_FRAME_SAVE_REG_SIZE - 16);
    14071494
    14081495    /* sub sp, sp, IEMNATIVE_FRAME_VAR_SIZE ;  Allocate the variable area from SP. */
    1409     AssertCompile(IEMNATIVE_FRAME_VAR_SIZE < 4096);
    1410     pu32CodeBuf[off++] = UINT32_C(0xd1000000) | (IEMNATIVE_FRAME_VAR_SIZE << 10)             | ARMV8_A64_REG_SP | ARMV8_A64_REG_SP;
     1496    pu32CodeBuf[off++] = Armv8A64MkInstrAddSub(true /*fSub*/, ARMV8_A64_REG_SP, ARMV8_A64_REG_SP, IEMNATIVE_FRAME_VAR_SIZE);
     1497
     1498    /* mov r28, r0  */
     1499    off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_REG_FIXED_PVMCPU, IEMNATIVE_CALL_ARG0_GREG);
    14111500
    14121501#else
     
    15071596
    15081597#elif defined(RT_ARCH_ARM64)
     1598            case kIemNativeFixupType_RelImm19At5:
     1599            {
     1600                Assert(paFixups[i].off < off);
     1601                int32_t const offDisp = paLabels[paFixups[i].idxLabel].off - paFixups[i].off + paFixups[i].offAddend;
     1602                Assert(offDisp >= -262144 && offDisp < 262144);
     1603                *Ptr.pu32 = (*Ptr.pu32 & UINT32_C(0xff00001f)) | (offDisp << 5);
     1604                continue;
     1605            }
    15091606#endif
    15101607            case kIemNativeFixupType_Invalid:
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r101247 r101248  
    116116#ifdef RT_ARCH_AMD64
    117117# define IEMNATIVE_REG_FIXED_PVMCPU         X86_GREG_xBX
    118 #elif RT_ARCH_ARM64
     118
     119#elif defined(RT_ARCH_ARM64)
    119120# define IEMNATIVE_REG_FIXED_PVMCPU         ARMV8_A64_REG_X28
    120121/** Dedicated temporary register.
    121122 * @todo replace this by a register allocator and content tracker.  */
    122123# define IEMNATIVE_REG_FIXED_TMP0           ARMV8_A64_REG_X15
     124
    123125#else
    124126# error "port me"
    125127#endif
     128/** @} */
     129
     130/** @name Call related registers.
     131 * @{ */
     132/** @def IEMNATIVE_CALL_RET_GREG
     133 * The return value register. */
     134/** @def IEMNATIVE_CALL_ARG_GREG_COUNT
     135 * Number of arguments in registers. */
     136/** @def IEMNATIVE_CALL_ARG0_GREG
     137 * The general purpose register carrying argument \#0. */
     138/** @def IEMNATIVE_CALL_ARG1_GREG
     139 * The general purpose register carrying argument \#1. */
     140/** @def IEMNATIVE_CALL_ARG2_GREG
     141 * The general purpose register carrying argument \#2. */
     142/** @def IEMNATIVE_CALL_ARG3_GREG
     143 * The general purpose register carrying argument \#3. */
     144#ifdef RT_ARCH_AMD64
     145# define IEMNATIVE_CALL_RET_GREG             X86_GREG_xAX
     146
     147# ifdef RT_OS_WINDOWS
     148#  define IEMNATIVE_CALL_ARG_GREG_COUNT     4
     149#  define IEMNATIVE_CALL_ARG0_GREG          X86_GREG_xCX
     150#  define IEMNATIVE_CALL_ARG1_GREG          X86_GREG_xDX
     151#  define IEMNATIVE_CALL_ARG2_GREG          X86_GREG_x8
     152#  define IEMNATIVE_CALL_ARG3_GREG          X86_GREG_x9
     153# else
     154#  define IEMNATIVE_CALL_ARG_GREG_COUNT     6
     155#  define IEMNATIVE_CALL_ARG0_GREG          X86_GREG_xDI
     156#  define IEMNATIVE_CALL_ARG1_GREG          X86_GREG_xSI
     157#  define IEMNATIVE_CALL_ARG2_GREG          X86_GREG_xDX
     158#  define IEMNATIVE_CALL_ARG3_GREG          X86_GREG_xCX
     159#  define IEMNATIVE_CALL_ARG4_GREG          X86_GREG_x8
     160#  define IEMNATIVE_CALL_ARG5_GREG          X86_GREG_x9
     161# endif
     162
     163#elif defined(RT_ARCH_ARM64)
     164# define IEMNATIVE_CALL_RET_GREG            ARMV8_A64_REG_X0
     165# define IEMNATIVE_CALL_ARG_GREG_COUNT      8
     166# define IEMNATIVE_CALL_ARG0_GREG           ARMV8_A64_REG_X0
     167# define IEMNATIVE_CALL_ARG1_GREG           ARMV8_A64_REG_X1
     168# define IEMNATIVE_CALL_ARG2_GREG           ARMV8_A64_REG_X2
     169# define IEMNATIVE_CALL_ARG3_GREG           ARMV8_A64_REG_X3
     170# define IEMNATIVE_CALL_ARG4_GREG           ARMV8_A64_REG_X4
     171# define IEMNATIVE_CALL_ARG5_GREG           ARMV8_A64_REG_X5
     172# define IEMNATIVE_CALL_ARG6_GREG           ARMV8_A64_REG_X6
     173# define IEMNATIVE_CALL_ARG7_GREG           ARMV8_A64_REG_X7
     174
     175#endif
     176
    126177/** @} */
    127178
     
    158209    kIemNativeFixupType_Rel32,
    159210#elif defined(RT_ARCH_ARM64)
     211    /** ARM64 fixup: PC relative offset at bits 23:5 (CBZ, CBNZ).  */
     212    kIemNativeFixupType_RelImm19At5,
    160213#endif
    161214    kIemNativeFixupType_End
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette