Changeset 58938 in vbox
- Timestamp:
- Dec 1, 2015 2:17:45 PM (9 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
-
include/VBox/vmm/dbgf.h (modified) (3 diffs)
-
include/VBox/vmm/hm.h (modified) (1 diff)
-
src/VBox/VMM/VMMR0/HMVMXR0.cpp (modified) (3 diffs)
-
src/VBox/VMM/VMMR3/DBGF.cpp (modified) (9 diffs)
-
src/VBox/VMM/VMMR3/HM.cpp (modified) (1 diff)
-
src/VBox/VMM/include/HMInternal.h (modified) (3 diffs)
-
src/VBox/VMM/include/HMInternal.mac (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/dbgf.h
r58932 r58938 256 256 DBGFEVENT_MEMORY_ROM_WRITE, 257 257 258 /** The first VM exit event. */ 259 DBGFEVENT_EXIT_FIRST, 258 260 /** Exit - Task switch. 259 261 * @todo not yet implemented. */ 260 DBGFEVENT_EXIT_TASK_SWITCH ,262 DBGFEVENT_EXIT_TASK_SWITCH = DBGFEVENT_EXIT_FIRST, 261 263 /** Exit - HALT instruction. 262 264 * @todo not yet implemented. */ … … 316 318 * @todo not yet implemented. */ 317 319 DBGFEVENT_EXIT_VMM_CALL, 320 /** Exit - the last common event. */ 321 DBGFEVENT_EXIT_LAST_COMMON = DBGFEVENT_EXIT_VMM_CALL, 322 323 /** Exit - VT-x - First. */ 324 DBGFEVENT_EXIT_VMX_FIRST, 318 325 /** Exit - VT-x VMCLEAR instruction. 319 326 * @todo not yet implemented. */ 320 DBGFEVENT_EXIT_V TX_VMCLEAR,327 DBGFEVENT_EXIT_VMX_VMCLEAR = DBGFEVENT_EXIT_VMX_FIRST, 321 328 /** Exit - VT-x VMLAUNCH instruction. 322 329 * @todo not yet implemented. */ 323 DBGFEVENT_EXIT_V TX_VMLAUNCH,330 DBGFEVENT_EXIT_VMX_VMLAUNCH, 324 331 /** Exit - VT-x VMPTRLD instruction. 325 332 * @todo not yet implemented. */ 326 DBGFEVENT_EXIT_V TX_VMPTRLD,333 DBGFEVENT_EXIT_VMX_VMPTRLD, 327 334 /** Exit - VT-x VMPTRST instruction. 328 335 * @todo not yet implemented. */ 329 DBGFEVENT_EXIT_V TX_VMPTRST,336 DBGFEVENT_EXIT_VMX_VMPTRST, 330 337 /** Exit - VT-x VMREAD instruction. 331 338 * @todo not yet implemented. */ 332 DBGFEVENT_EXIT_V TX_VMREAD,339 DBGFEVENT_EXIT_VMX_VMREAD, 333 340 /** Exit - VT-x VMRESUME instruction. 334 341 * @todo not yet implemented. */ 335 DBGFEVENT_EXIT_V TX_VMRESUME,342 DBGFEVENT_EXIT_VMX_VMRESUME, 336 343 /** Exit - VT-x VMWRITE instruction. 337 344 * @todo not yet implemented. */ 338 DBGFEVENT_EXIT_V TX_VMWRITE,345 DBGFEVENT_EXIT_VMX_VMWRITE, 339 346 /** Exit - VT-x VMXOFF instruction. 340 347 * @todo not yet implemented. */ 341 DBGFEVENT_EXIT_V TX_VMXOFF,348 DBGFEVENT_EXIT_VMX_VMXOFF, 342 349 /** Exit - VT-x VMXON instruction. 343 350 * @todo not yet implemented. */ 344 DBGFEVENT_EXIT_V TX_VMXON,351 DBGFEVENT_EXIT_VMX_VMXON, 345 352 /** Exit - VT-x VMFUNC instruction. 346 353 * @todo not yet implemented. */ 347 DBGFEVENT_EXIT_VTX_VMFUNC, 354 DBGFEVENT_EXIT_VMX_VMFUNC, 355 /** Exit - VT-x - Last. */ 356 DBGFEVENT_EXIT_VMX_LAST = DBGFEVENT_EXIT_VMX_VMFUNC, 357 358 /** Exit - AMD-V - first */ 359 DBGFEVENT_EXIT_SVM_FIRST, 348 360 /** Exit - AMD-V VMRUN instruction. 349 361 * @todo not yet implemented. */ 350 DBGFEVENT_EXIT_SVM_VMRUN ,362 DBGFEVENT_EXIT_SVM_VMRUN = DBGFEVENT_EXIT_SVM_FIRST, 351 363 /** Exit - AMD-V VMLOAD instruction. 352 364 * @todo not yet implemented. */ … … 361 373 * @todo not yet implemented. */ 362 374 DBGFEVENT_EXIT_SVM_CLGI, 375 /** The last ADM-V VM exit event. */ 376 DBGFEVENT_EXIT_SVM_LAST = DBGFEVENT_EXIT_SVM_CLGI, 377 378 /** The last VM exit event. */ 379 DBGFEVENT_EXIT_LAST = DBGFEVENT_EXIT_SVM_LAST, 380 363 381 364 382 /** End of valid event values. */ -
trunk/include/VBox/vmm/hm.h
r58110 r58938 227 227 VMMR3_INT_DECL(void) HMR3NotifyScheduled(PVMCPU pVCpu); 228 228 VMMR3_INT_DECL(void) HMR3NotifyEmulated(PVMCPU pVCpu); 229 VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM); 230 VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu); 229 231 VMMR3_INT_DECL(bool) HMR3IsActive(PVMCPU pVCpu); 230 232 VMMR3_INT_DECL(void) HMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r58918 r58938 8855 8855 * @note Mostly the same as hmR0VmxRunGuestCodeNormal(). 8856 8856 */ 8857 static VBOXSTRICTRC hmR0VmxRunGuestCode Step(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)8857 static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 8858 8858 { 8859 8859 VMXTRANSIENT VmxTransient; … … 8959 8959 8960 8960 /** 8961 * Checks if any expensive dtrace probes are enabled and we should go to the 8962 * debug loop. 8963 * 8964 * @returns true if we should use debug loop, false if not. 8965 */ 8966 static bool hmR0VmxAnyExpensiveProbesEnabled(void) 8967 { 8968 /* It's probably faster to OR the raw 32-bit counter variables together. 8969 Since the variables are in an array and the probes are next to one 8970 another (more or less), we have good locality. So, better read two three 8971 cache lines ever time and only have one conditional, than 20+ conditionals. */ 8972 return ( VBOXVMM_XCPT_DE_ENABLED_RAW() 8973 | VBOXVMM_XCPT_DB_ENABLED_RAW() 8974 | VBOXVMM_XCPT_BP_ENABLED_RAW() 8975 | VBOXVMM_XCPT_OF_ENABLED_RAW() 8976 | VBOXVMM_XCPT_BR_ENABLED_RAW() 8977 | VBOXVMM_XCPT_UD_ENABLED_RAW() 8978 | VBOXVMM_XCPT_NM_ENABLED_RAW() 8979 | VBOXVMM_XCPT_DF_ENABLED_RAW() 8980 | VBOXVMM_XCPT_TS_ENABLED_RAW() 8981 | VBOXVMM_XCPT_NP_ENABLED_RAW() 8982 | VBOXVMM_XCPT_SS_ENABLED_RAW() 8983 | VBOXVMM_XCPT_GP_ENABLED_RAW() 8984 | VBOXVMM_XCPT_PG_ENABLED_RAW() 8985 | VBOXVMM_XCPT_MF_ENABLED_RAW() 8986 | VBOXVMM_XCPT_AC_ENABLED_RAW() 8987 | VBOXVMM_XCPT_XF_ENABLED_RAW() 8988 | VBOXVMM_XCPT_VE_ENABLED_RAW() 8989 | VBOXVMM_XCPT_SX_ENABLED_RAW() 8990 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW() 8991 | VBOXVMM_INT_HARDWARE_ENABLED_RAW() 8992 ) != 0; 8993 } 8994 8995 8996 /** 8961 8997 * Runs the guest code using VT-x. 8962 8998 * … … 8975 9011 8976 9012 VBOXSTRICTRC rcStrict; 8977 if (!pVCpu->hm.s.fSingleInstruction) 9013 if ( !pVCpu->hm.s.fUseDebugLoop 9014 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled()) ) 8978 9015 rcStrict = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx); 8979 9016 else 8980 rcStrict = hmR0VmxRunGuestCode Step(pVM, pVCpu, pCtx);9017 rcStrict = hmR0VmxRunGuestCodeDebug(pVM, pVCpu, pCtx); 8981 9018 8982 9019 if (rcStrict == VERR_EM_INTERPRETER) -
trunk/src/VBox/VMM/VMMR3/DBGF.cpp
r58909 r58938 1222 1222 static DECLCALLBACK(VBOXSTRICTRC) dbgfR3EventConfigNotifyAllCpus(PVM pVM, PVMCPU pVCpu, void *pvUser) 1223 1223 { 1224 /* Don't do anything, just make sure all CPUs goes thru EM. */1225 NOREF(pVM); NOREF(pVCpu); NOREF(pvUser);1224 if (pvUser /*fIsHmEnabled*/) 1225 HMR3NotifyDebugEventChangedPerCpu(pVM, pVCpu); 1226 1226 return VINF_SUCCESS; 1227 1227 } … … 1256 1256 1257 1257 /* 1258 * I f this is an SMP setup, we must interrupt the other CPUs if there were1259 * changes just to make sure their execution engines are aware of them.1258 * Inform HM about changes. In an SMP setup, interrupt execution on the 1259 * other CPUs so their execution loop can be reselected. 1260 1260 */ 1261 1261 int rc = VINF_SUCCESS; 1262 if (cChanges > 0 && pVM->cCpus > 1) 1263 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventConfigNotifyAllCpus, NULL); 1262 if (cChanges > 0) 1263 { 1264 bool const fIsHmEnabled = HMIsEnabled(pVM); 1265 if (fIsHmEnabled) 1266 HMR3NotifyDebugEventChanged(pVM); 1267 if (pVM->cCpus > 1 || fIsHmEnabled) 1268 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventConfigNotifyAllCpus, 1269 (void *)(uintptr_t)fIsHmEnabled); 1270 } 1264 1271 return rc; 1265 1272 } … … 1393 1400 * Apply the changes. 1394 1401 */ 1395 bool fChanged; 1402 bool fChanged = false; 1403 bool fThis; 1396 1404 for (uint32_t i = 0; i < cConfigs; i++) 1397 1405 { … … 1401 1409 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED) 1402 1410 { 1403 fChanged = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false;1404 if (f Changed)1411 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == false; 1412 if (fThis) 1405 1413 { 1406 1414 Assert(pVM->dbgf.s.cHardIntBreakpoints < 256); … … 1410 1418 else if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_DISABLED) 1411 1419 { 1412 fChanged = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true;1413 if (f Changed)1420 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmHardIntBreakpoints, paConfigs[i].iInterrupt) == true; 1421 if (fThis) 1414 1422 { 1415 1423 Assert(pVM->dbgf.s.cHardIntBreakpoints > 0); … … 1423 1431 if (paConfigs[i].enmHardState == DBGFINTERRUPTSTATE_ENABLED) 1424 1432 { 1425 fChanged = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false;1426 if (f Changed)1433 fChanged |= fThis = ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == false; 1434 if (fThis) 1427 1435 { 1428 1436 Assert(pVM->dbgf.s.cSoftIntBreakpoints < 256); … … 1432 1440 else if (paConfigs[i].enmSoftState == DBGFINTERRUPTSTATE_DISABLED) 1433 1441 { 1434 fChanged = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true;1435 if (f Changed)1442 fChanged |= fThis = ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSoftIntBreakpoints, paConfigs[i].iInterrupt) == true; 1443 if (fThis) 1436 1444 { 1437 1445 Assert(pVM->dbgf.s.cSoftIntBreakpoints > 0); … … 1442 1450 1443 1451 /* 1444 * Update the event bitmap entries and see if we need to notify other CPUs. 1445 */ 1446 fChanged = false; 1452 * Update the event bitmap entries. 1453 */ 1447 1454 if (pVM->dbgf.s.cHardIntBreakpoints > 0) 1448 1455 fChanged |= ASMAtomicBitTestAndSet(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_HARDWARE) == false; … … 1455 1462 fChanged |= ASMAtomicBitTestAndClear(&pVM->dbgf.s.bmSelectedEvents, DBGFEVENT_INTERRUPT_SOFTWARE) == true; 1456 1463 1464 1465 /* 1466 * Inform HM about changes. In an SMP setup, interrupt execution on the 1467 * other CPUs so their execution loop can be reselected. 1468 */ 1457 1469 int rc = VINF_SUCCESS; 1458 if (fChanged && pVM->cCpus > 1) 1459 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventConfigNotifyAllCpus, NULL); 1470 if (fChanged) 1471 { 1472 bool const fIsHmEnabled = HMIsEnabled(pVM); 1473 if (fIsHmEnabled) 1474 HMR3NotifyDebugEventChanged(pVM); 1475 if (pVM->cCpus > 1 || fIsHmEnabled) 1476 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, dbgfR3EventConfigNotifyAllCpus, 1477 (void *)(uintptr_t)fIsHmEnabled); 1478 } 1460 1479 return rc; 1461 1480 } -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r58909 r58938 2754 2754 2755 2755 /** 2756 * Noticiation callback from DBGF when interrupt breakpoints or generic debug 2757 * event settings changes. 2758 * 2759 * DBGF will call HMR3NotifyDebugEventChangedPerCpu on each CPU afterwards, this 2760 * function is just updating the VM globals. 2761 * 2762 * @param pVM The VM cross context VM structure. 2763 * @thread EMT(0) 2764 */ 2765 VMMR3_INT_DECL(void) HMR3NotifyDebugEventChanged(PVM pVM) 2766 { 2767 /* Interrupts. */ 2768 bool fUseDebugLoop = pVM->dbgf.ro.cSoftIntBreakpoints > 0 2769 || pVM->dbgf.ro.cHardIntBreakpoints > 0; 2770 2771 /* CPU Exceptions. */ 2772 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_XCPT_FIRST; 2773 !fUseDebugLoop && enmEvent <= DBGFEVENT_XCPT_LAST; 2774 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1)) 2775 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent); 2776 2777 /* Common VM exits. */ 2778 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_FIRST; 2779 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_LAST_COMMON; 2780 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1)) 2781 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent); 2782 2783 /* Vendor specific VM exits. */ 2784 if (HMR3IsVmxEnabled(pVM->pUVM)) 2785 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_VMX_FIRST; 2786 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_VMX_LAST; 2787 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1)) 2788 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent); 2789 else 2790 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_SVM_FIRST; 2791 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_SVM_LAST; 2792 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1)) 2793 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent); 2794 2795 /* Done. */ 2796 pVM->hm.s.fUseDebugLoop = fUseDebugLoop; 2797 } 2798 2799 2800 /** 2801 * Follow up notification callback to HMR3NotifyDebugEventChanged for each CPU. 2802 * 2803 * HM uses this to combine the decision made by HMR3NotifyDebugEventChanged with 2804 * per CPU settings. 2805 * 2806 * @param pVM The VM cross context VM structure. 2807 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 2808 */ 2809 VMMR3_INT_DECL(void) HMR3NotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu) 2810 { 2811 pVCpu->hm.s.fUseDebugLoop = pVCpu->hm.s.fSingleInstruction | pVM->hm.s.fUseDebugLoop; 2812 } 2813 2814 2815 /** 2756 2816 * Notification from EM about a rescheduling into hardware assisted execution 2757 2817 * mode. -
trunk/src/VBox/VMM/include/HMInternal.h
r58913 r58938 372 372 /** Set when TPR patching is active. */ 373 373 bool fTPRPatchingActive; 374 bool u8Alignment[3]; 374 /** Set when the debug facility has breakpoints/events enabled that requires 375 * us to use the debug execution loop in ring-0. */ 376 bool fUseDebugLoop; 377 bool u8Alignment[2]; 375 378 376 379 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */ … … 631 634 /** Whether we're executing a single instruction. */ 632 635 bool fSingleInstruction; 636 /** Whether we should use the debug loop because of single stepping or special 637 * debug breakpoints / events are armed. */ 638 bool fUseDebugLoop; 633 639 /** Set if we need to clear the trap flag because of single stepping. */ 634 640 bool fClearTrapFlag; … … 647 653 /** Whether paravirt. hypercalls are enabled. */ 648 654 bool fHypercallsEnabled; 649 uint8_t u8Alignment0[ 5];655 uint8_t u8Alignment0[4]; 650 656 651 657 /** World switch exit counter. */ -
trunk/src/VBox/VMM/include/HMInternal.mac
r57446 r58938 68 68 .fCheckedTLBFlush resb 1 69 69 .fSingleInstruction resb 1 70 .fUseDebugLoop resb 1 70 71 .fClearTrapFlag resb 1 71 72 .fLeaveDone resb 1
Note:
See TracChangeset
for help on using the changeset viewer.

