- Timestamp:
- Dec 11, 2023 1:37:11 PM (10 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
-
VMMAll/IEMAllN8vePython.py (modified) (2 diffs)
-
VMMAll/IEMAllN8veRecompiler.cpp (modified) (11 diffs)
-
include/IEMMc.h (modified) (5 diffs)
-
include/IEMN8veRecompilerEmit.h (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r102510 r102569 271 271 # collections as we go along. 272 272 # 273 273 274 def freeVariable(aoStmts, iStmt, oVarInfo, dFreedVars, dVars, fIncludeReferences = True): 274 275 sVarName = oVarInfo.oStmt.sVarName; … … 368 369 if oVarInfo.isArg(): 369 370 self.raiseProblem('Unused argument variable: %s' % (oVarInfo.oStmt.sVarName,)); 371 372 elif oStmt.sName in ('IEM_MC_MEM_COMMIT_AND_UNMAP_RW', 'IEM_MC_MEM_COMMIT_AND_UNMAP_RO', 373 'IEM_MC_MEM_COMMIT_AND_UNMAP_WO', 'IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO', 374 'IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO'): 375 # 376 # The unmap info variable passed to IEM_MC_MEM_COMMIT_AND_UNMAP_RW 377 # and friends is implictly freed and we must make sure it wasn't 378 # used any later. IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO takes 379 # an additional a_u16FSW argument, which receives the same treatement. 380 # 381 for sParam in oStmt.asParams: 382 oVarInfo = dVars.get(sParam); 383 if oVarInfo: 384 dFreedVars[sParam] = oVarInfo; 385 del dVars[sParam]; 386 else: 387 self.raiseProblem('Variable %s was used after implictly frees by %s!' % (sParam, oStmt.sName,)); 370 388 else: 371 389 # -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102558 r102569 3775 3775 * been allocated as such already and won't need moving, 3776 3776 * just freeing. 3777 * @param fKeepVars Mask of variables that should keep their register 3778 * assignments. Caller must take care to handle these. 3777 3779 */ 3778 3780 DECL_HIDDEN_THROW(uint32_t) 3779 iemNativeRegMoveAndFreeAndFlushAtCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs )3781 iemNativeRegMoveAndFreeAndFlushAtCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs, uint32_t fKeepVars = 0) 3780 3782 { 3781 3783 Assert(cArgs <= IEMNATIVE_CALL_MAX_ARG_COUNT); 3784 3785 /* fKeepVars will reduce this mask. */ 3786 uint32_t fRegsToFree = IEMNATIVE_CALL_VOLATILE_GREG_MASK; 3782 3787 3783 3788 /* … … 3811 3816 Assert(pReNative->Core.bmVars & RT_BIT_32(idxVar)); 3812 3817 Assert(pReNative->Core.aVars[idxVar].idxReg == idxReg); 3813 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: idxVar=%d enmKind=%d idxReg=%d\n", 3814 idxVar, pReNative->Core.aVars[idxVar].enmKind, pReNative->Core.aVars[idxVar].idxReg)); 3815 if (pReNative->Core.aVars[idxVar].enmKind != kIemNativeVarKind_Stack) 3816 pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX; 3818 if (!(RT_BIT_32(idxVar) & fKeepVars)) 3819 { 3820 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: idxVar=%d enmKind=%d idxReg=%d\n", 3821 idxVar, pReNative->Core.aVars[idxVar].enmKind, pReNative->Core.aVars[idxVar].idxReg)); 3822 if (pReNative->Core.aVars[idxVar].enmKind != kIemNativeVarKind_Stack) 3823 pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX; 3824 else 3825 off = iemNativeRegMoveOrSpillStackVar(pReNative, off, idxVar); 3826 } 3817 3827 else 3818 off = iemNativeRegMoveOrSpillStackVar(pReNative, off, idxVar);3828 fRegsToFree &= ~RT_BIT_32(idxReg); 3819 3829 continue; 3820 3830 } … … 3844 3854 * Do the actual freeing. 3845 3855 */ 3846 if (pReNative->Core.bmHstRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK) 3847 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: bmHstRegs %#x -> %#x\n", pReNative->Core.bmHstRegs, pReNative->Core.bmHstRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK)); 3848 pReNative->Core.bmHstRegs &= ~IEMNATIVE_CALL_VOLATILE_GREG_MASK; 3856 if (pReNative->Core.bmHstRegs & fRegsToFree) 3857 Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: bmHstRegs %#x -> %#x\n", 3858 pReNative->Core.bmHstRegs, pReNative->Core.bmHstRegs & ~fRegsToFree)); 3859 pReNative->Core.bmHstRegs &= ~fRegsToFree; 3849 3860 3850 3861 /* If there are guest register shadows in any call-volatile register, we 3851 3862 have to clear the corrsponding guest register masks for each register. */ 3852 uint32_t fHstRegsWithGstShadow = pReNative->Core.bmHstRegsWithGstShadow & IEMNATIVE_CALL_VOLATILE_GREG_MASK;3863 uint32_t fHstRegsWithGstShadow = pReNative->Core.bmHstRegsWithGstShadow & fRegsToFree; 3853 3864 if (fHstRegsWithGstShadow) 3854 3865 { … … 6289 6300 * Will throw VERR_IEM_VAR_NOT_INITIALIZED if this is not 6290 6301 * the case. 6291 */ 6292 DECL_HIDDEN_THROW(uint8_t) iemNativeVarRegisterAcquire(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, 6293 uint32_t *poff, bool fInitialized = false) 6302 * @param idxRegPref Preferred register number or UINT8_MAX. 6303 */ 6304 DECL_HIDDEN_THROW(uint8_t) iemNativeVarRegisterAcquire(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint32_t *poff, 6305 bool fInitialized = false, uint8_t idxRegPref = UINT8_MAX) 6294 6306 { 6295 6307 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar); … … 6340 6352 Log11(("iemNativeVarRegisterAcquire: idxVar=%u idxReg=%u (matching arg %u)\n", idxVar, idxReg, uArgNo)); 6341 6353 } 6342 else 6354 else if ( idxRegPref < RT_ELEMENTS(pReNative->Core.aHstRegs) 6355 || (pReNative->Core.bmHstRegs & RT_BIT_32(idxRegPref))) 6343 6356 { 6344 6357 uint32_t const fNotArgsMask = ~g_afIemNativeCallRegs[RT_MIN(pReNative->cArgs, IEMNATIVE_CALL_ARG_GREG_COUNT)]; … … 6363 6376 Log11(("iemNativeVarRegisterAcquire: idxVar=%u idxReg=%u (slow, uArgNo=%u)\n", idxVar, idxReg, uArgNo)); 6364 6377 } 6378 } 6379 else 6380 { 6381 idxReg = idxRegPref; 6382 iemNativeRegClearGstRegShadowing(pReNative, idxReg, *poff); 6383 Log11(("iemNativeVarRegisterAcquire: idxVar=%u idxReg=%u (preferred)\n", idxVar, idxReg)); 6365 6384 } 6366 6385 iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Var, idxVar); … … 10145 10164 default: AssertFailed(); 10146 10165 } 10166 #else 10167 RT_NOREF(fAccess); 10147 10168 #endif 10148 10169 … … 10159 10180 /* 10160 10181 * Move/spill/flush stuff out of call-volatile registers. 10182 * 10183 * We exclude any register holding the bUnmapInfo variable, as we'll be 10184 * checking it after returning from the call and will free it afterwards. 10161 10185 */ 10162 10186 /** @todo save+restore active registers and maybe guest shadows in miss 10163 10187 * scenario. */ 10164 off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 0 /* vacate all non-volatile regs */ );10188 off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 0 /* vacate all non-volatile regs */, RT_BIT_32(idxVarUnmapInfo)); 10165 10189 10166 10190 /* 10167 10191 * If idxVarUnmapInfo is zero, we can skip all this. Otherwise we'll have 10168 10192 * to call the unmap helper function. 10169 */ 10170 //pReNative->pInstrBuf[off++] = 0xcc; 10171 RT_NOREF(fAccess); 10172 10193 * 10194 * The likelyhood of it being zero is higher than for the TLB hit when doing 10195 * the mapping, as a TLB miss for an well aligned and unproblematic memory 10196 * access should also end up with a mapping that won't need special unmapping. 10197 */ 10198 /** @todo Go over iemMemMapJmp and implement the no-unmap-needed case! That 10199 * should speed up things for the pure interpreter as well when TLBs 10200 * are enabled. */ 10173 10201 #ifdef RT_ARCH_AMD64 10174 10202 if (pReNative->Core.aVars[idxVarUnmapInfo].idxReg == UINT8_MAX) … … 10185 10213 #endif 10186 10214 { 10187 uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxVarUnmapInfo, &off); 10215 uint8_t const idxVarReg = iemNativeVarRegisterAcquire(pReNative, idxVarUnmapInfo, &off, 10216 true /*fInitialized*/, IEMNATIVE_CALL_ARG1_GREG /*idxRegPref*/); 10188 10217 off = iemNativeEmitTestAnyBitsInGpr8(pReNative, off, idxVarReg, 0xff); 10189 10218 iemNativeVarRegisterRelease(pReNative, idxVarUnmapInfo); … … 10201 10230 #endif 10202 10231 10203 /* IEMNATIVE_CALL_ARG1_GREG = idxVarUnmapInfo */10232 /* IEMNATIVE_CALL_ARG1_GREG = idxVarUnmapInfo (first!) */ 10204 10233 off = iemNativeEmitLoadArgGregFromStackVar(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxVarUnmapInfo); 10205 10234 … … 10209 10238 /* Done setting up parameters, make the call. */ 10210 10239 off = iemNativeEmitCallImm(pReNative, off, pfnFunction); 10240 10241 /* The bUnmapInfo variable is implictly free by these MCs. */ 10242 iemNativeVarFreeLocal(pReNative, idxVarUnmapInfo); 10211 10243 10212 10244 /* -
trunk/src/VBox/VMM/include/IEMMc.h
r102510 r102569 2260 2260 /** Commits the memory and unmaps guest memory previously mapped RW. 2261 2261 * @remarks May return. 2262 * @note Implictly frees the a_bMapInfo variable. 2262 2263 */ 2263 2264 #ifndef IEM_WITH_SETJMP … … 2269 2270 /** Commits the memory and unmaps guest memory previously mapped W. 2270 2271 * @remarks May return. 2272 * @note Implictly frees the a_bMapInfo variable. 2271 2273 */ 2272 2274 #ifndef IEM_WITH_SETJMP … … 2278 2280 /** Commits the memory and unmaps guest memory previously mapped R. 2279 2281 * @remarks May return. 2282 * @note Implictly frees the a_bMapInfo variable. 2280 2283 */ 2281 2284 #ifndef IEM_WITH_SETJMP … … 2294 2297 * 2295 2298 * @remarks May in theory return - for now. 2299 * @note Implictly frees both the a_bMapInfo and a_u16FSW variables. 2296 2300 */ 2297 2301 #ifndef IEM_WITH_SETJMP … … 2315 2319 #endif 2316 2320 2317 /** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory. */ 2321 /** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory. 2322 * @note Implictly frees the a_bMapInfo variable. */ 2318 2323 #ifndef IEM_WITH_SETJMP 2319 2324 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo) iemMemRollbackAndUnmap(pVCpu, a_bMapInfo) -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r102512 r102569 3215 3215 Assert(!(RT_BIT_32(idxRegVar) & IEMNATIVE_CALL_VOLATILE_GREG_MASK)); 3216 3216 if (!offAddend) 3217 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegArg, idxRegVar); 3217 { 3218 if (idxRegArg != idxRegVar) 3219 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegArg, idxRegVar); 3220 } 3218 3221 else 3219 3222 off = iemNativeEmitLoadGprFromGprWithAddend(pReNative, off, idxRegArg, idxRegVar, offAddend);
Note:
See TracChangeset
for help on using the changeset viewer.

