VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 99740

Last change on this file since 99740 was 99725, checked in by vboxsync, 18 months ago

VMM/EM: Nested VMX: bugref:10318 Clarify comment regarding priority of interrupt-window vs NMI delivery. The code should eventually be restructured.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 117.2 KB
Line 
1/* $Id: EM.cpp 99725 2023-05-10 13:41:39Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RemExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/vmm/cpumdis.h>
71#include <VBox/dis.h>
72#include <VBox/err.h>
73#include "VMMTracing.h"
74
75#include <iprt/asm.h>
76#include <iprt/string.h>
77#include <iprt/stream.h>
78#include <iprt/thread.h>
79
80
81/*********************************************************************************************************************************
82* Internal Functions *
83*********************************************************************************************************************************/
84static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
85static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
86#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
87static const char *emR3GetStateName(EMSTATE enmState);
88#endif
89static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
90#if defined(VBOX_WITH_REM) || defined(DEBUG)
91static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
92#endif
93static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
94
95
96/**
97 * Initializes the EM.
98 *
99 * @returns VBox status code.
100 * @param pVM The cross context VM structure.
101 */
102VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
103{
104 LogFlow(("EMR3Init\n"));
105 /*
106 * Assert alignment and sizes.
107 */
108 AssertCompileMemberAlignment(VM, em.s, 32);
109 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
110 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
111 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
112
113 /*
114 * Init the structure.
115 */
116 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
117 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
118
119 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
120#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN) && !defined(VBOX_VMM_TARGET_ARMV8)
121 true
122#else
123 false
124#endif
125 );
126 AssertLogRelRCReturn(rc, rc);
127
128 bool fEnabled;
129 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
130 AssertLogRelRCReturn(rc, rc);
131 pVM->em.s.fGuruOnTripleFault = !fEnabled;
132 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
133 {
134 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
135 pVM->em.s.fGuruOnTripleFault = true;
136 }
137
138 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
139
140 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
141 * Whether to try correlate exit history in any context, detect hot spots and
142 * try optimize these using IEM if there are other exits close by. This
143 * overrides the context specific settings. */
144 bool fExitOptimizationEnabled = true;
145 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
146 AssertLogRelRCReturn(rc, rc);
147
148 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
149 * Whether to optimize exits in ring-0. Setting this to false will also disable
150 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
151 * capabilities of the host kernel, this optimization may be unavailable. */
152 bool fExitOptimizationEnabledR0 = true;
153 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
154 AssertLogRelRCReturn(rc, rc);
155 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
156
157 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
158 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
159 * hooks are in effect). */
160 /** @todo change the default to true here */
161 bool fExitOptimizationEnabledR0PreemptDisabled = true;
162 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
163 AssertLogRelRCReturn(rc, rc);
164 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
165
166 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
167 * Maximum number of instruction to let EMHistoryExec execute in one go. */
168 uint16_t cHistoryExecMaxInstructions = 8192;
169 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
170 AssertLogRelRCReturn(rc, rc);
171 if (cHistoryExecMaxInstructions < 16)
172 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
173
174 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
175 * Maximum number of instruction between exits during probing. */
176 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
177#ifdef RT_OS_WINDOWS
178 if (VM_IS_NEM_ENABLED(pVM))
179 cHistoryProbeMaxInstructionsWithoutExit = 32;
180#endif
181 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
182 cHistoryProbeMaxInstructionsWithoutExit);
183 AssertLogRelRCReturn(rc, rc);
184 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
185 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
186 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
187
188 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
189 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
190 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
191 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
192 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
193 cHistoryProbeMinInstructions);
194 AssertLogRelRCReturn(rc, rc);
195
196 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
197 {
198 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
199 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
200 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
201 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
202 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
203 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
204 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
205 }
206
207 /*
208 * Saved state.
209 */
210 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
211 NULL, NULL, NULL,
212 NULL, emR3Save, NULL,
213 NULL, emR3Load, NULL);
214 if (RT_FAILURE(rc))
215 return rc;
216
217 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
218 {
219 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
220
221 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
222 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
223 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
224 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
225
226# define EM_REG_COUNTER(a, b, c) \
227 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
228 AssertRC(rc);
229
230# define EM_REG_COUNTER_USED(a, b, c) \
231 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
232 AssertRC(rc);
233
234# define EM_REG_PROFILE(a, b, c) \
235 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
236 AssertRC(rc);
237
238# define EM_REG_PROFILE_ADV(a, b, c) \
239 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
240 AssertRC(rc);
241
242 /*
243 * Statistics.
244 */
245#ifdef VBOX_WITH_STATISTICS
246 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
247 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
248
249 /* these should be considered for release statistics. */
250 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
251 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
252 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
253#endif
254 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
255 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
256#ifdef VBOX_WITH_STATISTICS
257 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
258 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
259 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
260#endif
261 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
262 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
263#ifdef VBOX_WITH_STATISTICS
264 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
265 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
266 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
267 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
268 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
269 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
270#endif /* VBOX_WITH_STATISTICS */
271
272 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
273 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
274 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
275 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
276 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
277
278 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
279
280 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
281 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
282 AssertRC(rc);
283
284 /* History record statistics */
285 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
286 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
287 AssertRC(rc);
288
289 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
290 {
291 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
292 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
293 AssertRC(rc);
294 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
295 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
296 AssertRC(rc);
297 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
298 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
299 AssertRC(rc);
300 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
301 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
302 AssertRC(rc);
303 }
304
305 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
306 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
307 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
308 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
309 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
310 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
311 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
312 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
313 }
314
315 emR3InitDbg(pVM);
316 return VINF_SUCCESS;
317}
318
319
320/**
321 * Called when a VM initialization stage is completed.
322 *
323 * @returns VBox status code.
324 * @param pVM The cross context VM structure.
325 * @param enmWhat The initialization state that was completed.
326 */
327VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
328{
329 if (enmWhat == VMINITCOMPLETED_RING0)
330 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
331 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
332 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
333 return VINF_SUCCESS;
334}
335
336
337/**
338 * Applies relocations to data and code managed by this
339 * component. This function will be called at init and
340 * whenever the VMM need to relocate it self inside the GC.
341 *
342 * @param pVM The cross context VM structure.
343 */
344VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
345{
346 LogFlow(("EMR3Relocate\n"));
347 RT_NOREF(pVM);
348}
349
350
351/**
352 * Reset the EM state for a CPU.
353 *
354 * Called by EMR3Reset and hot plugging.
355 *
356 * @param pVCpu The cross context virtual CPU structure.
357 */
358VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
359{
360 /* Reset scheduling state. */
361 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
362
363 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
364 out of the HALTED state here so that enmPrevState doesn't end up as
365 HALTED when EMR3Execute returns. */
366 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
367 {
368 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
369 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
370 }
371}
372
373
374/**
375 * Reset notification.
376 *
377 * @param pVM The cross context VM structure.
378 */
379VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
380{
381 Log(("EMR3Reset: \n"));
382 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
383 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
384}
385
386
387/**
388 * Terminates the EM.
389 *
390 * Termination means cleaning up and freeing all resources,
391 * the VM it self is at this point powered off or suspended.
392 *
393 * @returns VBox status code.
394 * @param pVM The cross context VM structure.
395 */
396VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
397{
398 RT_NOREF(pVM);
399 return VINF_SUCCESS;
400}
401
402
403/**
404 * Execute state save operation.
405 *
406 * @returns VBox status code.
407 * @param pVM The cross context VM structure.
408 * @param pSSM SSM operation handle.
409 */
410static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
411{
412 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
413 {
414 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
415
416 SSMR3PutBool(pSSM, false /*fForceRAW*/);
417
418 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
419 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
420 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
421
422 /* Save mwait state. */
423 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
424 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
425 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
426 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
427 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
428 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
429 AssertRCReturn(rc, rc);
430 }
431 return VINF_SUCCESS;
432}
433
434
435/**
436 * Execute state load operation.
437 *
438 * @returns VBox status code.
439 * @param pVM The cross context VM structure.
440 * @param pSSM SSM operation handle.
441 * @param uVersion Data layout version.
442 * @param uPass The data pass.
443 */
444static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
445{
446 /*
447 * Validate version.
448 */
449 if ( uVersion > EM_SAVED_STATE_VERSION
450 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
451 {
452 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
453 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
454 }
455 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
456
457 /*
458 * Load the saved state.
459 */
460 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
461 {
462 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
463
464 bool fForceRAWIgnored;
465 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
466 AssertRCReturn(rc, rc);
467
468 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
469 {
470 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
471 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
472
473 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
474 }
475 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
476 {
477 /* Load mwait state. */
478 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
479 AssertRCReturn(rc, rc);
480 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
481 AssertRCReturn(rc, rc);
482 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
483 AssertRCReturn(rc, rc);
484 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
485 AssertRCReturn(rc, rc);
486 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
487 AssertRCReturn(rc, rc);
488 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
489 AssertRCReturn(rc, rc);
490 }
491 }
492 return VINF_SUCCESS;
493}
494
495
496/**
497 * Argument packet for emR3SetExecutionPolicy.
498 */
499struct EMR3SETEXECPOLICYARGS
500{
501 EMEXECPOLICY enmPolicy;
502 bool fEnforce;
503};
504
505
506/**
507 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
508 */
509static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
510{
511 /*
512 * Only the first CPU changes the variables.
513 */
514 if (pVCpu->idCpu == 0)
515 {
516 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
517 switch (pArgs->enmPolicy)
518 {
519 case EMEXECPOLICY_RECOMPILE_RING0:
520 case EMEXECPOLICY_RECOMPILE_RING3:
521 break;
522 case EMEXECPOLICY_IEM_ALL:
523 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
524
525 /* For making '.alliem 1' useful during debugging, transition the
526 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
527 for (VMCPUID i = 0; i < pVM->cCpus; i++)
528 {
529 PVMCPU pVCpuX = pVM->apCpusR3[i];
530 switch (pVCpuX->em.s.enmState)
531 {
532 case EMSTATE_DEBUG_GUEST_RAW:
533 case EMSTATE_DEBUG_GUEST_HM:
534 case EMSTATE_DEBUG_GUEST_NEM:
535 case EMSTATE_DEBUG_GUEST_REM:
536 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
537 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
538 break;
539 case EMSTATE_DEBUG_GUEST_IEM:
540 default:
541 break;
542 }
543 }
544 break;
545 default:
546 AssertFailedReturn(VERR_INVALID_PARAMETER);
547 }
548 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
549 }
550
551 /*
552 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
553 */
554 return pVCpu->em.s.enmState == EMSTATE_RAW
555 || pVCpu->em.s.enmState == EMSTATE_HM
556 || pVCpu->em.s.enmState == EMSTATE_NEM
557 || pVCpu->em.s.enmState == EMSTATE_IEM
558 || pVCpu->em.s.enmState == EMSTATE_REM
559 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
560 ? VINF_EM_RESCHEDULE
561 : VINF_SUCCESS;
562}
563
564
565/**
566 * Changes an execution scheduling policy parameter.
567 *
568 * This is used to enable or disable raw-mode / hardware-virtualization
569 * execution of user and supervisor code.
570 *
571 * @returns VINF_SUCCESS on success.
572 * @returns VINF_RESCHEDULE if a rescheduling might be required.
573 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
574 *
575 * @param pUVM The user mode VM handle.
576 * @param enmPolicy The scheduling policy to change.
577 * @param fEnforce Whether to enforce the policy or not.
578 */
579VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
580{
581 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
582 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
583 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
584
585 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
586 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
587}
588
589
590/**
591 * Queries an execution scheduling policy parameter.
592 *
593 * @returns VBox status code
594 * @param pUVM The user mode VM handle.
595 * @param enmPolicy The scheduling policy to query.
596 * @param pfEnforced Where to return the current value.
597 */
598VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
599{
600 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
601 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
602 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
603 PVM pVM = pUVM->pVM;
604 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
605
606 /* No need to bother EMTs with a query. */
607 switch (enmPolicy)
608 {
609 case EMEXECPOLICY_RECOMPILE_RING0:
610 case EMEXECPOLICY_RECOMPILE_RING3:
611 *pfEnforced = false;
612 break;
613 case EMEXECPOLICY_IEM_ALL:
614 *pfEnforced = pVM->em.s.fIemExecutesAll;
615 break;
616 default:
617 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
618 }
619
620 return VINF_SUCCESS;
621}
622
623
624/**
625 * Queries the main execution engine of the VM.
626 *
627 * @returns VBox status code
628 * @param pUVM The user mode VM handle.
629 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
630 */
631VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
632{
633 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
634 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
635
636 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
637 PVM pVM = pUVM->pVM;
638 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
639
640 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
641 return VINF_SUCCESS;
642}
643
644
645/**
646 * Raise a fatal error.
647 *
648 * Safely terminate the VM with full state report and stuff. This function
649 * will naturally never return.
650 *
651 * @param pVCpu The cross context virtual CPU structure.
652 * @param rc VBox status code.
653 */
654VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
655{
656 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
657 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
658}
659
660
661#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
662/**
663 * Gets the EM state name.
664 *
665 * @returns pointer to read only state name,
666 * @param enmState The state.
667 */
668static const char *emR3GetStateName(EMSTATE enmState)
669{
670 switch (enmState)
671 {
672 case EMSTATE_NONE: return "EMSTATE_NONE";
673 case EMSTATE_RAW: return "EMSTATE_RAW";
674 case EMSTATE_HM: return "EMSTATE_HM";
675 case EMSTATE_IEM: return "EMSTATE_IEM";
676 case EMSTATE_REM: return "EMSTATE_REM";
677 case EMSTATE_HALTED: return "EMSTATE_HALTED";
678 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
679 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
680 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
681 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
682 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
683 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
684 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
685 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
686 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
687 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
688 case EMSTATE_NEM: return "EMSTATE_NEM";
689 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
690 default: return "Unknown!";
691 }
692}
693#endif /* LOG_ENABLED || VBOX_STRICT */
694
695
696#if !defined(VBOX_VMM_TARGET_ARMV8)
697/**
698 * Handle pending ring-3 I/O port write.
699 *
700 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
701 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
702 *
703 * @returns Strict VBox status code.
704 * @param pVM The cross context VM structure.
705 * @param pVCpu The cross context virtual CPU structure.
706 */
707VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
708{
709 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
710
711 /* Get and clear the pending data. */
712 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
713 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
714 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
715 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
716 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
717
718 /* Assert sanity. */
719 switch (cbValue)
720 {
721 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
722 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
723 case 4: break;
724 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
725 }
726 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
727
728 /* Do the work.*/
729 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
730 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
731 if (IOM_SUCCESS(rcStrict))
732 {
733 pVCpu->cpum.GstCtx.rip += cbInstr;
734 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
735 }
736 return rcStrict;
737}
738
739
740/**
741 * Handle pending ring-3 I/O port write.
742 *
743 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
744 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
745 *
746 * @returns Strict VBox status code.
747 * @param pVM The cross context VM structure.
748 * @param pVCpu The cross context virtual CPU structure.
749 */
750VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
751{
752 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
753
754 /* Get and clear the pending data. */
755 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
756 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
757 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
758 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
759
760 /* Assert sanity. */
761 switch (cbValue)
762 {
763 case 1: break;
764 case 2: break;
765 case 4: break;
766 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
767 }
768 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
769 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
770
771 /* Do the work.*/
772 uint32_t uValue = 0;
773 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
774 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
775 if (IOM_SUCCESS(rcStrict))
776 {
777 if (cbValue == 4)
778 pVCpu->cpum.GstCtx.rax = uValue;
779 else if (cbValue == 2)
780 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
781 else
782 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
783 pVCpu->cpum.GstCtx.rip += cbInstr;
784 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
785 }
786 return rcStrict;
787}
788
789
790/**
791 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
792 * Worker for emR3ExecuteSplitLockInstruction}
793 */
794static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
795{
796 /* Only execute on the specified EMT. */
797 if (pVCpu == (PVMCPU)pvUser)
798 {
799 LogFunc(("\n"));
800 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
801 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
802 if (rcStrict == VINF_IEM_RAISED_XCPT)
803 rcStrict = VINF_SUCCESS;
804 return rcStrict;
805 }
806 RT_NOREF(pVM);
807 return VINF_SUCCESS;
808}
809
810
811/**
812 * Handle an instruction causing a split cacheline lock access in SMP VMs.
813 *
814 * Generally we only get here if the host has split-lock detection enabled and
815 * this caused an \#AC because of something the guest did. If we interpret the
816 * instruction as-is, we'll likely just repeat the split-lock access and
817 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
818 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
819 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
820 * disregard the lock prefix when emulating the instruction.
821 *
822 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
823 * feature when entering guest context, but the support for the feature isn't a
824 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
825 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
826 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
827 * propert detection to SUPDrv later if we find it necessary.
828 *
829 * @see @bugref{10052}
830 *
831 * @returns Strict VBox status code.
832 * @param pVM The cross context VM structure.
833 * @param pVCpu The cross context virtual CPU structure.
834 */
835VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
836{
837 LogFunc(("\n"));
838 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
839}
840#endif /* VBOX_VMM_TARGET_ARMV8 */
841
842
843/**
844 * Debug loop.
845 *
846 * @returns VBox status code for EM.
847 * @param pVM The cross context VM structure.
848 * @param pVCpu The cross context virtual CPU structure.
849 * @param rc Current EM VBox status code.
850 */
851static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
852{
853 for (;;)
854 {
855 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
856 const VBOXSTRICTRC rcLast = rc;
857
858 /*
859 * Debug related RC.
860 */
861 switch (VBOXSTRICTRC_VAL(rc))
862 {
863 /*
864 * Single step an instruction.
865 */
866 case VINF_EM_DBG_STEP:
867 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
868 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
869 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
870#if !defined(VBOX_VMM_TARGET_ARMV8)
871 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
872 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
873#endif
874 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
875 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
876#ifdef VBOX_WITH_REM /** @todo fix me? */
877 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
878 rc = emR3RemStep(pVM, pVCpu);
879#endif
880 else
881 {
882 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
883 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
884 rc = VINF_EM_DBG_STEPPED;
885 }
886 break;
887
888 /*
889 * Simple events: stepped, breakpoint, stop/assertion.
890 */
891 case VINF_EM_DBG_STEPPED:
892 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
893 break;
894
895 case VINF_EM_DBG_BREAKPOINT:
896 rc = DBGFR3BpHit(pVM, pVCpu);
897 break;
898
899 case VINF_EM_DBG_STOP:
900 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
901 break;
902
903 case VINF_EM_DBG_EVENT:
904 rc = DBGFR3EventHandlePending(pVM, pVCpu);
905 break;
906
907 case VINF_EM_DBG_HYPER_STEPPED:
908 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
909 break;
910
911 case VINF_EM_DBG_HYPER_BREAKPOINT:
912 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
913 break;
914
915 case VINF_EM_DBG_HYPER_ASSERTION:
916 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
917 RTLogFlush(NULL);
918 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
919 break;
920
921 /*
922 * Guru meditation.
923 */
924 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
925 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
926 break;
927 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
928 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
929 break;
930 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
931 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
932 break;
933
934 default: /** @todo don't use default for guru, but make special errors code! */
935 {
936 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
937 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
938 break;
939 }
940 }
941
942 /*
943 * Process the result.
944 */
945 switch (VBOXSTRICTRC_VAL(rc))
946 {
947 /*
948 * Continue the debugging loop.
949 */
950 case VINF_EM_DBG_STEP:
951 case VINF_EM_DBG_STOP:
952 case VINF_EM_DBG_EVENT:
953 case VINF_EM_DBG_STEPPED:
954 case VINF_EM_DBG_BREAKPOINT:
955 case VINF_EM_DBG_HYPER_STEPPED:
956 case VINF_EM_DBG_HYPER_BREAKPOINT:
957 case VINF_EM_DBG_HYPER_ASSERTION:
958 break;
959
960 /*
961 * Resuming execution (in some form) has to be done here if we got
962 * a hypervisor debug event.
963 */
964 case VINF_SUCCESS:
965 case VINF_EM_RESUME:
966 case VINF_EM_SUSPEND:
967 case VINF_EM_RESCHEDULE:
968 case VINF_EM_RESCHEDULE_RAW:
969 case VINF_EM_RESCHEDULE_REM:
970 case VINF_EM_HALT:
971 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
972 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
973 if (rc == VINF_SUCCESS)
974 rc = VINF_EM_RESCHEDULE;
975 return rc;
976
977 /*
978 * The debugger isn't attached.
979 * We'll simply turn the thing off since that's the easiest thing to do.
980 */
981 case VERR_DBGF_NOT_ATTACHED:
982 switch (VBOXSTRICTRC_VAL(rcLast))
983 {
984 case VINF_EM_DBG_HYPER_STEPPED:
985 case VINF_EM_DBG_HYPER_BREAKPOINT:
986 case VINF_EM_DBG_HYPER_ASSERTION:
987 case VERR_TRPM_PANIC:
988 case VERR_TRPM_DONT_PANIC:
989 case VERR_VMM_RING0_ASSERTION:
990 case VERR_VMM_HYPER_CR3_MISMATCH:
991 case VERR_VMM_RING3_CALL_DISABLED:
992 return rcLast;
993 }
994 return VINF_EM_OFF;
995
996 /*
997 * Status codes terminating the VM in one or another sense.
998 */
999 case VINF_EM_TERMINATE:
1000 case VINF_EM_OFF:
1001 case VINF_EM_RESET:
1002 case VINF_EM_NO_MEMORY:
1003 case VINF_EM_RAW_STALE_SELECTOR:
1004 case VINF_EM_RAW_IRET_TRAP:
1005 case VERR_TRPM_PANIC:
1006 case VERR_TRPM_DONT_PANIC:
1007 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1008 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1009 case VERR_VMM_RING0_ASSERTION:
1010 case VERR_VMM_HYPER_CR3_MISMATCH:
1011 case VERR_VMM_RING3_CALL_DISABLED:
1012 case VERR_INTERNAL_ERROR:
1013 case VERR_INTERNAL_ERROR_2:
1014 case VERR_INTERNAL_ERROR_3:
1015 case VERR_INTERNAL_ERROR_4:
1016 case VERR_INTERNAL_ERROR_5:
1017 case VERR_IPE_UNEXPECTED_STATUS:
1018 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1019 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1020 return rc;
1021
1022 /*
1023 * The rest is unexpected, and will keep us here.
1024 */
1025 default:
1026 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1027 break;
1028 }
1029 } /* debug for ever */
1030}
1031
1032
1033#if defined(VBOX_WITH_REM) || defined(DEBUG)
1034/**
1035 * Steps recompiled code.
1036 *
1037 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1038 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1039 *
1040 * @param pVM The cross context VM structure.
1041 * @param pVCpu The cross context virtual CPU structure.
1042 */
1043static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1044{
1045#if defined(VBOX_VMM_TARGET_ARMV8)
1046 Log3(("emR3RemStep: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1047#else
1048 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1049#endif
1050
1051 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1052
1053#if defined(VBOX_VMM_TARGET_ARMV8)
1054 Log3(("emR3RemStep: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1055#else
1056 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1057#endif
1058 return rc;
1059}
1060#endif /* VBOX_WITH_REM || DEBUG */
1061
1062
1063/**
1064 * Executes recompiled code.
1065 *
1066 * This function contains the recompiler version of the inner
1067 * execution loop (the outer loop being in EMR3ExecuteVM()).
1068 *
1069 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1070 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1071 *
1072 * @param pVM The cross context VM structure.
1073 * @param pVCpu The cross context virtual CPU structure.
1074 * @param pfFFDone Where to store an indicator telling whether or not
1075 * FFs were done before returning.
1076 *
1077 */
1078static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1079{
1080#ifdef LOG_ENABLED
1081# if defined(VBOX_VMM_TARGET_ARMV8)
1082 Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1083# else
1084 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1085
1086 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1087 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1088 else
1089 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1090# endif
1091#endif
1092 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1093
1094 /*
1095 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1096 * or the REM suggests raw-mode execution.
1097 */
1098 *pfFFDone = false;
1099 uint32_t cLoops = 0;
1100 int rc = VINF_SUCCESS;
1101 for (;;)
1102 {
1103 /*
1104 * Execute REM.
1105 */
1106 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1107 {
1108 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1109 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1110 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1111 }
1112 else
1113 {
1114 /* Give up this time slice; virtual time continues */
1115 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1116 RTThreadSleep(5);
1117 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1118 rc = VINF_SUCCESS;
1119 }
1120
1121 /*
1122 * Deal with high priority post execution FFs before doing anything
1123 * else. Sync back the state and leave the lock to be on the safe side.
1124 */
1125 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1126 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1127 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1128
1129 /*
1130 * Process the returned status code.
1131 */
1132 if (rc != VINF_SUCCESS)
1133 {
1134 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1135 break;
1136 if (rc != VINF_REM_INTERRUPED_FF)
1137 {
1138 /* Try dodge unimplemented IEM trouble by reschduling. */
1139 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1140 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1141 {
1142 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1143 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1144 {
1145 rc = VINF_EM_RESCHEDULE;
1146 break;
1147 }
1148 }
1149
1150 /*
1151 * Anything which is not known to us means an internal error
1152 * and the termination of the VM!
1153 */
1154 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1155 break;
1156 }
1157 }
1158
1159
1160 /*
1161 * Check and execute forced actions.
1162 *
1163 * Sync back the VM state and leave the lock before calling any of
1164 * these, you never know what's going to happen here.
1165 */
1166#ifdef VBOX_HIGH_RES_TIMERS_HACK
1167 TMTimerPollVoid(pVM, pVCpu);
1168#endif
1169 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1170 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1171 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1172 {
1173 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1174 rc = emR3ForcedActions(pVM, pVCpu, rc);
1175 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1176 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1177 if ( rc != VINF_SUCCESS
1178 && rc != VINF_EM_RESCHEDULE_REM)
1179 {
1180 *pfFFDone = true;
1181 break;
1182 }
1183 }
1184
1185 /*
1186 * Have to check if we can get back to fast execution mode every so often.
1187 */
1188 if (!(++cLoops & 7))
1189 {
1190 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1191 if ( enmCheck != EMSTATE_REM
1192 && enmCheck != EMSTATE_IEM_THEN_REM)
1193 {
1194 LogFlow(("emR3RemExecute: emR3Reschedule -> %d -> VINF_EM_RESCHEDULE\n", enmCheck));
1195 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1196 return VINF_EM_RESCHEDULE;
1197 }
1198 Log2(("emR3RemExecute: emR3Reschedule -> %d\n", enmCheck));
1199 }
1200
1201 } /* The Inner Loop, recompiled execution mode version. */
1202
1203 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1204 return rc;
1205}
1206
1207
1208#ifdef DEBUG
1209
1210int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1211{
1212 EMSTATE enmOldState = pVCpu->em.s.enmState;
1213
1214 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1215
1216 Log(("Single step BEGIN:\n"));
1217 for (uint32_t i = 0; i < cIterations; i++)
1218 {
1219 DBGFR3PrgStep(pVCpu);
1220 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1221 emR3RemStep(pVM, pVCpu);
1222 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1223 break;
1224 }
1225 Log(("Single step END:\n"));
1226#if defined(VBOX_VMM_TARGET_ARMV8)
1227 AssertReleaseFailed();
1228#else
1229 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1230#endif
1231 pVCpu->em.s.enmState = enmOldState;
1232 return VINF_EM_RESCHEDULE;
1233}
1234
1235#endif /* DEBUG */
1236
1237
1238/**
1239 * Try execute the problematic code in IEM first, then fall back on REM if there
1240 * is too much of it or if IEM doesn't implement something.
1241 *
1242 * @returns Strict VBox status code from IEMExecLots.
1243 * @param pVM The cross context VM structure.
1244 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1245 * @param pfFFDone Force flags done indicator.
1246 *
1247 * @thread EMT(pVCpu)
1248 */
1249static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1250{
1251#if defined(VBOX_VMM_TARGET_ARMV8)
1252 LogFlow(("emR3ExecuteIemThenRem: %RGv\n", CPUMGetGuestFlatPC(pVCpu)));
1253#else
1254 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1255#endif
1256 *pfFFDone = false;
1257
1258 /*
1259 * Execute in IEM for a while.
1260 */
1261 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1262 {
1263 uint32_t cInstructions;
1264 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1265 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1266 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1267 if (rcStrict != VINF_SUCCESS)
1268 {
1269 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1270 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1271 break;
1272
1273 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1274 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1275 return rcStrict;
1276 }
1277
1278 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1279 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1280 {
1281 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1282 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1283 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1284 pVCpu->em.s.enmState = enmNewState;
1285 return VINF_SUCCESS;
1286 }
1287
1288 /*
1289 * Check for pending actions.
1290 */
1291 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1292 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1293 return VINF_SUCCESS;
1294 }
1295
1296 /*
1297 * Switch to REM.
1298 */
1299 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1300 pVCpu->em.s.enmState = EMSTATE_REM;
1301 return VINF_SUCCESS;
1302}
1303
1304
1305/**
1306 * Decides whether to execute RAW, HWACC or REM.
1307 *
1308 * @returns new EM state
1309 * @param pVM The cross context VM structure.
1310 * @param pVCpu The cross context virtual CPU structure.
1311 */
1312EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1313{
1314 /*
1315 * We stay in the wait for SIPI state unless explicitly told otherwise.
1316 */
1317 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1318 return EMSTATE_WAIT_SIPI;
1319
1320 /*
1321 * Execute everything in IEM?
1322 */
1323 if ( pVM->em.s.fIemExecutesAll
1324 || VM_IS_EXEC_ENGINE_IEM(pVM))
1325 return EMSTATE_IEM;
1326
1327#if !defined(VBOX_VMM_TARGET_ARMV8)
1328 if (VM_IS_HM_ENABLED(pVM))
1329 {
1330 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1331 return EMSTATE_HM;
1332 }
1333 else
1334#endif
1335 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1336 return EMSTATE_NEM;
1337
1338 /*
1339 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1340 * turns off monitoring features essential for raw mode!
1341 */
1342 return EMSTATE_IEM_THEN_REM;
1343}
1344
1345
1346/**
1347 * Executes all high priority post execution force actions.
1348 *
1349 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1350 * fatal error status code.
1351 *
1352 * @param pVM The cross context VM structure.
1353 * @param pVCpu The cross context virtual CPU structure.
1354 * @param rc The current strict VBox status code rc.
1355 */
1356VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1357{
1358 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1359
1360 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1361 PDMCritSectBothFF(pVM, pVCpu);
1362
1363#if !defined(VBOX_VMM_TARGET_ARMV8)
1364 /* Update CR3 (Nested Paging case for HM). */
1365 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1366 {
1367 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1368 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1369 if (RT_FAILURE(rc2))
1370 return rc2;
1371 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1372 }
1373#endif
1374
1375 /* IEM has pending work (typically memory write after INS instruction). */
1376 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1377 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1378
1379 /* IOM has pending work (comitting an I/O or MMIO write). */
1380 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1381 {
1382 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1383 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1384 { /* half likely, or at least it's a line shorter. */ }
1385 else if (rc == VINF_SUCCESS)
1386 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1387 else
1388 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1389 }
1390
1391 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1392 {
1393 if ( rc > VINF_EM_NO_MEMORY
1394 && rc <= VINF_EM_LAST)
1395 rc = VINF_EM_NO_MEMORY;
1396 }
1397
1398 return rc;
1399}
1400
1401
1402#if !defined(VBOX_VMM_TARGET_ARMV8)
1403/**
1404 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1405 *
1406 * @returns VBox status code.
1407 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1408 * @param pVCpu The cross context virtual CPU structure.
1409 */
1410static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1411{
1412#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1413 /* Handle the "external interrupt" VM-exit intercept. */
1414 if ( CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1415 && !CPUMIsGuestVmxExitCtlsSet(&pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1416 {
1417 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1418 AssertMsg( rcStrict != VINF_VMX_VMEXIT /* VM-exit should have been converted to VINF_SUCCESS. */
1419 && rcStrict != VINF_NO_CHANGE
1420 && rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1421 return VBOXSTRICTRC_VAL(rcStrict);
1422 }
1423#else
1424 RT_NOREF(pVCpu);
1425#endif
1426 return VINF_NO_CHANGE;
1427}
1428
1429
1430/**
1431 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1432 *
1433 * @returns VBox status code.
1434 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1435 * @param pVCpu The cross context virtual CPU structure.
1436 */
1437static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1438{
1439#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1440 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1441 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1442 {
1443 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1444 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1445 if (RT_SUCCESS(rcStrict))
1446 {
1447 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1448 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1449 return VBOXSTRICTRC_VAL(rcStrict);
1450 }
1451
1452 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1453 return VINF_EM_TRIPLE_FAULT;
1454 }
1455#else
1456 NOREF(pVCpu);
1457#endif
1458 return VINF_NO_CHANGE;
1459}
1460
1461
1462/**
1463 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1464 *
1465 * @returns VBox status code.
1466 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1467 * @param pVCpu The cross context virtual CPU structure.
1468 */
1469static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1470{
1471#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1472 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1473 {
1474 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1475 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1476 if (RT_SUCCESS(rcStrict))
1477 {
1478 Assert(rcStrict != VINF_SVM_VMEXIT);
1479 return VBOXSTRICTRC_VAL(rcStrict);
1480 }
1481 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1482 return VINF_EM_TRIPLE_FAULT;
1483 }
1484#else
1485 NOREF(pVCpu);
1486#endif
1487 return VINF_NO_CHANGE;
1488}
1489#endif
1490
1491
1492/**
1493 * Executes all pending forced actions.
1494 *
1495 * Forced actions can cause execution delays and execution
1496 * rescheduling. The first we deal with using action priority, so
1497 * that for instance pending timers aren't scheduled and ran until
1498 * right before execution. The rescheduling we deal with using
1499 * return codes. The same goes for VM termination, only in that case
1500 * we exit everything.
1501 *
1502 * @returns VBox status code of equal or greater importance/severity than rc.
1503 * The most important ones are: VINF_EM_RESCHEDULE,
1504 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1505 *
1506 * @param pVM The cross context VM structure.
1507 * @param pVCpu The cross context virtual CPU structure.
1508 * @param rc The current rc.
1509 *
1510 */
1511int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1512{
1513 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1514#ifdef VBOX_STRICT
1515 int rcIrq = VINF_SUCCESS;
1516#endif
1517 int rc2;
1518#define UPDATE_RC() \
1519 do { \
1520 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1521 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1522 break; \
1523 if (!rc || rc2 < rc) \
1524 rc = rc2; \
1525 } while (0)
1526 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1527
1528 /*
1529 * Post execution chunk first.
1530 */
1531 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1532 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1533 {
1534 /*
1535 * EMT Rendezvous (must be serviced before termination).
1536 */
1537 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1538 {
1539 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1540 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1541 UPDATE_RC();
1542 /** @todo HACK ALERT! The following test is to make sure EM+TM
1543 * thinks the VM is stopped/reset before the next VM state change
1544 * is made. We need a better solution for this, or at least make it
1545 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1546 * VINF_EM_SUSPEND). */
1547 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1548 {
1549 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1550 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1551 return rc;
1552 }
1553 }
1554
1555 /*
1556 * State change request (cleared by vmR3SetStateLocked).
1557 */
1558 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1559 {
1560 VMSTATE enmState = VMR3GetState(pVM);
1561 switch (enmState)
1562 {
1563 case VMSTATE_FATAL_ERROR:
1564 case VMSTATE_FATAL_ERROR_LS:
1565 case VMSTATE_GURU_MEDITATION:
1566 case VMSTATE_GURU_MEDITATION_LS:
1567 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1568 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1569 return VINF_EM_SUSPEND;
1570
1571 case VMSTATE_DESTROYING:
1572 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1573 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1574 return VINF_EM_TERMINATE;
1575
1576 default:
1577 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1578 }
1579 }
1580
1581 /*
1582 * Debugger Facility polling.
1583 */
1584 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1585 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1586 {
1587 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1588 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1589 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1590 * somewhere before we get here, I would think. */
1591 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1592 rc = rc2;
1593 else
1594 UPDATE_RC();
1595 }
1596
1597 /*
1598 * Postponed reset request.
1599 */
1600 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1601 {
1602 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1603 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1604 UPDATE_RC();
1605 }
1606
1607 /*
1608 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1609 */
1610 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1611 {
1612 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1613 UPDATE_RC();
1614 if (rc == VINF_EM_NO_MEMORY)
1615 return rc;
1616 }
1617
1618 /* check that we got them all */
1619 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1620 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1621 }
1622
1623 /*
1624 * Normal priority then.
1625 * (Executed in no particular order.)
1626 */
1627 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1628 {
1629 /*
1630 * PDM Queues are pending.
1631 */
1632 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1633 PDMR3QueueFlushAll(pVM);
1634
1635 /*
1636 * PDM DMA transfers are pending.
1637 */
1638 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1639 PDMR3DmaRun(pVM);
1640
1641 /*
1642 * EMT Rendezvous (make sure they are handled before the requests).
1643 */
1644 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1645 {
1646 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1647 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1648 UPDATE_RC();
1649 /** @todo HACK ALERT! The following test is to make sure EM+TM
1650 * thinks the VM is stopped/reset before the next VM state change
1651 * is made. We need a better solution for this, or at least make it
1652 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1653 * VINF_EM_SUSPEND). */
1654 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1655 {
1656 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1657 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1658 return rc;
1659 }
1660 }
1661
1662 /*
1663 * Requests from other threads.
1664 */
1665 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1666 {
1667 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1668 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1669 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1670 {
1671 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1672 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1673 return rc2;
1674 }
1675 UPDATE_RC();
1676 /** @todo HACK ALERT! The following test is to make sure EM+TM
1677 * thinks the VM is stopped/reset before the next VM state change
1678 * is made. We need a better solution for this, or at least make it
1679 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1680 * VINF_EM_SUSPEND). */
1681 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1682 {
1683 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1684 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1685 return rc;
1686 }
1687 }
1688
1689 /* check that we got them all */
1690 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1691 }
1692
1693 /*
1694 * Normal priority then. (per-VCPU)
1695 * (Executed in no particular order.)
1696 */
1697 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1698 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1699 {
1700 /*
1701 * Requests from other threads.
1702 */
1703 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1704 {
1705 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1706 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1707 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1708 {
1709 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1710 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1711 return rc2;
1712 }
1713 UPDATE_RC();
1714 /** @todo HACK ALERT! The following test is to make sure EM+TM
1715 * thinks the VM is stopped/reset before the next VM state change
1716 * is made. We need a better solution for this, or at least make it
1717 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1718 * VINF_EM_SUSPEND). */
1719 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1720 {
1721 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1722 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1723 return rc;
1724 }
1725 }
1726
1727 /* check that we got them all */
1728 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1729 }
1730
1731 /*
1732 * High priority pre execution chunk last.
1733 * (Executed in ascending priority order.)
1734 */
1735 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1736 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1737 {
1738 /*
1739 * Timers before interrupts.
1740 */
1741 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1742 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1743 TMR3TimerQueuesDo(pVM);
1744
1745#if !defined(VBOX_VMM_TARGET_ARMV8)
1746 /*
1747 * Pick up asynchronously posted interrupts into the APIC.
1748 */
1749 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1750 APICUpdatePendingInterrupts(pVCpu);
1751
1752 /*
1753 * The instruction following an emulated STI should *always* be executed!
1754 *
1755 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
1756 * the eip is the same as the inhibited instr address. Before we
1757 * are able to execute this instruction in raw mode (iret to
1758 * guest code) an external interrupt might force a world switch
1759 * again. Possibly allowing a guest interrupt to be dispatched
1760 * in the process. This could break the guest. Sounds very
1761 * unlikely, but such timing sensitive problem are not as rare as
1762 * you might think.
1763 *
1764 * Note! This used to be a force action flag. Can probably ditch this code.
1765 */
1766 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1767 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1768 {
1769 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
1770 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
1771 {
1772 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
1773 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
1774 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
1775 }
1776 else
1777 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1778 }
1779
1780 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1781 * delivered. */
1782
1783# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1784 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
1785 | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW))
1786 {
1787 /*
1788 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1789 * Takes priority over even SMI and INIT signals.
1790 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1791 */
1792 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1793 {
1794 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1795 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1796 UPDATE_RC();
1797 }
1798
1799 /*
1800 * APIC write emulation MAY have a caused a VM-exit.
1801 * If it did cause a VM-exit, there's no point checking the other VMX non-root mode FFs here.
1802 */
1803 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
1804 {
1805 /*
1806 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1807 * Takes priority over "Traps on the previous instruction".
1808 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1809 */
1810 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1811 {
1812 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1813 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1814 UPDATE_RC();
1815 }
1816 /*
1817 * VMX Nested-guest preemption timer VM-exit.
1818 * Takes priority over NMI-window VM-exits.
1819 */
1820 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1821 {
1822 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1823 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1824 UPDATE_RC();
1825 }
1826 /*
1827 * VMX interrupt-window and NMI-window VM-exits.
1828 * Takes priority over non-maskable interrupts (NMIs) and external interrupts respectively.
1829 * If we are in an interrupt shadow or if we already in the process of delivering
1830 * an event then these VM-exits cannot occur.
1831 *
1832 * Interrupt shadows block NMI-window VM-exits.
1833 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1834 *
1835 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1836 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1837 * See Intel spec. 6.7 "Nonmaskable Interrupt (NMI)".
1838 */
1839 else if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1840 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)
1841 && !TRPMHasTrap(pVCpu))
1842 {
1843 /*
1844 * VMX NMI-window VM-exit.
1845 */
1846 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1847 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1848 {
1849 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1850 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1851 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1852 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1853 && rc2 != VINF_VMX_VMEXIT
1854 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1855 UPDATE_RC();
1856 }
1857 /*
1858 * VMX interrupt-window VM-exit.
1859 * This is a bit messy with the way the code below is currently structured,
1860 * but checking VMCPU_FF_INTERRUPT_NMI here (combined with CPUMAreInterruptsInhibitedByNmi
1861 * already checked at this point) should allow a pending NMI to be delivered prior to
1862 * causing an interrupt-window VM-exit.
1863 */
1864 /** @todo Restructure this later to happen after injecting NMI/causing NMI-exit, see
1865 * code in VMX R0 event delivery. */
1866 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1867 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1868 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1869 {
1870 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1871 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1872 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1873 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1874 && rc2 != VINF_VMX_VMEXIT
1875 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1876 UPDATE_RC();
1877 }
1878 }
1879 }
1880
1881 /*
1882 * Interrupt-window and NMI-window force flags might still be pending if we didn't actually cause
1883 * a VM-exit above. They will get cleared eventually when ANY nested-guest VM-exit occurs.
1884 * However, the force flags asserted below MUST have been cleared at this point.
1885 */
1886 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
1887 }
1888# endif
1889
1890 /*
1891 * Guest event injection.
1892 */
1893 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1894 bool fWakeupPending = false;
1895 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
1896 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
1897 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1898 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1899 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1900 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
1901 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1902 {
1903 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
1904 {
1905 bool fInVmxNonRootMode;
1906 bool fInSvmHwvirtMode;
1907 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
1908 {
1909 fInVmxNonRootMode = false;
1910 fInSvmHwvirtMode = false;
1911 }
1912 else
1913 {
1914 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1915 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1916 }
1917
1918 /*
1919 * NMIs (take priority over external interrupts).
1920 */
1921 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1922 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1923 {
1924# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1925 if ( fInVmxNonRootMode
1926 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1927 {
1928 /* We MUST clear the NMI force-flag here, see @bugref{10318#c19}. */
1929 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1930 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1931 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1932 UPDATE_RC();
1933 }
1934 else
1935# endif
1936# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1937 if ( fInSvmHwvirtMode
1938 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1939 {
1940 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1941 AssertMsg( rc2 != VINF_SVM_VMEXIT
1942 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1943 UPDATE_RC();
1944 }
1945 else
1946# endif
1947 {
1948 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1949 if (rc2 == VINF_SUCCESS)
1950 {
1951 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1952 fWakeupPending = true;
1953 if (pVM->em.s.fIemExecutesAll)
1954 rc2 = VINF_EM_RESCHEDULE;
1955 else
1956 {
1957 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1958 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1959 : VINF_EM_RESCHEDULE_REM;
1960 }
1961 }
1962 UPDATE_RC();
1963 }
1964 }
1965# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1966 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1967 * actually pending like we currently do. */
1968# endif
1969 /*
1970 * External interrupts.
1971 */
1972 else
1973 {
1974 /*
1975 * VMX: virtual interrupts takes priority over physical interrupts.
1976 * SVM: physical interrupts takes priority over virtual interrupts.
1977 */
1978 if ( fInVmxNonRootMode
1979 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1980 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1981 {
1982 /** @todo NSTVMX: virtual-interrupt delivery. */
1983 rc2 = VINF_SUCCESS;
1984 }
1985 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1986 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1987 {
1988 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1989 if (fInVmxNonRootMode)
1990 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1991 else if (fInSvmHwvirtMode)
1992 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1993 else
1994 rc2 = VINF_NO_CHANGE;
1995
1996 if (rc2 == VINF_NO_CHANGE)
1997 {
1998 bool fInjected = false;
1999 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2000 /** @todo this really isn't nice, should properly handle this */
2001 /* Note! This can still cause a VM-exit (on Intel). */
2002 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
2003 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
2004 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
2005 fWakeupPending = true;
2006 if ( pVM->em.s.fIemExecutesAll
2007 && ( rc2 == VINF_EM_RESCHEDULE_REM
2008 || rc2 == VINF_EM_RESCHEDULE_HM
2009 || rc2 == VINF_EM_RESCHEDULE_RAW))
2010 {
2011 rc2 = VINF_EM_RESCHEDULE;
2012 }
2013# ifdef VBOX_STRICT
2014 if (fInjected)
2015 rcIrq = rc2;
2016# endif
2017 }
2018 UPDATE_RC();
2019 }
2020 else if ( fInSvmHwvirtMode
2021 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2022 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2023 {
2024 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
2025 if (rc2 == VINF_NO_CHANGE)
2026 {
2027 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2028 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
2029 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
2030 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2031 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2032 rc2 = VINF_EM_RESCHEDULE;
2033# ifdef VBOX_STRICT
2034 rcIrq = rc2;
2035# endif
2036 }
2037 UPDATE_RC();
2038 }
2039 }
2040 } /* CPUMGetGuestGif */
2041 }
2042#else
2043 bool fWakeupPending = false;
2044 //ssertReleaseFailed();
2045 /** @todo */
2046#endif
2047
2048 /*
2049 * Allocate handy pages.
2050 */
2051 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2052 {
2053 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2054 UPDATE_RC();
2055 }
2056
2057 /*
2058 * Debugger Facility request.
2059 */
2060 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2061 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2062 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2063 {
2064 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2065 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2066 UPDATE_RC();
2067 }
2068
2069 /*
2070 * EMT Rendezvous (must be serviced before termination).
2071 */
2072 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2073 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2074 {
2075 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2076 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2077 UPDATE_RC();
2078 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2079 * stopped/reset before the next VM state change is made. We need a better
2080 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2081 * && rc >= VINF_EM_SUSPEND). */
2082 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2083 {
2084 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2085 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2086 return rc;
2087 }
2088 }
2089
2090 /*
2091 * State change request (cleared by vmR3SetStateLocked).
2092 */
2093 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2094 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2095 {
2096 VMSTATE enmState = VMR3GetState(pVM);
2097 switch (enmState)
2098 {
2099 case VMSTATE_FATAL_ERROR:
2100 case VMSTATE_FATAL_ERROR_LS:
2101 case VMSTATE_GURU_MEDITATION:
2102 case VMSTATE_GURU_MEDITATION_LS:
2103 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2104 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2105 return VINF_EM_SUSPEND;
2106
2107 case VMSTATE_DESTROYING:
2108 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2109 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2110 return VINF_EM_TERMINATE;
2111
2112 default:
2113 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2114 }
2115 }
2116
2117 /*
2118 * Out of memory? Since most of our fellow high priority actions may cause us
2119 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2120 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2121 * than us since we can terminate without allocating more memory.
2122 */
2123 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2124 {
2125 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2126 UPDATE_RC();
2127 if (rc == VINF_EM_NO_MEMORY)
2128 return rc;
2129 }
2130
2131 /*
2132 * If the virtual sync clock is still stopped, make TM restart it.
2133 */
2134 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2135 TMR3VirtualSyncFF(pVM, pVCpu);
2136
2137#ifdef DEBUG
2138 /*
2139 * Debug, pause the VM.
2140 */
2141 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2142 {
2143 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2144 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2145 return VINF_EM_SUSPEND;
2146 }
2147#endif
2148
2149 /* check that we got them all */
2150 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2151#if defined(VBOX_VMM_TARGET_ARMV8)
2152 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_DBGF));
2153#else
2154 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2155#endif
2156 }
2157
2158#undef UPDATE_RC
2159 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2160 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2161 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2162 return rc;
2163}
2164
2165
2166/**
2167 * Check if the preset execution time cap restricts guest execution scheduling.
2168 *
2169 * @returns true if allowed, false otherwise
2170 * @param pVM The cross context VM structure.
2171 * @param pVCpu The cross context virtual CPU structure.
2172 */
2173bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2174{
2175 uint64_t u64UserTime, u64KernelTime;
2176
2177 if ( pVM->uCpuExecutionCap != 100
2178 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2179 {
2180 uint64_t u64TimeNow = RTTimeMilliTS();
2181 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2182 {
2183 /* New time slice. */
2184 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2185 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2186 pVCpu->em.s.u64TimeSliceExec = 0;
2187 }
2188 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2189
2190 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2191 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2192 return false;
2193 }
2194 return true;
2195}
2196
2197
2198/**
2199 * Execute VM.
2200 *
2201 * This function is the main loop of the VM. The emulation thread
2202 * calls this function when the VM has been successfully constructed
2203 * and we're ready for executing the VM.
2204 *
2205 * Returning from this function means that the VM is turned off or
2206 * suspended (state already saved) and deconstruction is next in line.
2207 *
2208 * All interaction from other thread are done using forced actions
2209 * and signalling of the wait object.
2210 *
2211 * @returns VBox status code, informational status codes may indicate failure.
2212 * @param pVM The cross context VM structure.
2213 * @param pVCpu The cross context virtual CPU structure.
2214 */
2215VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2216{
2217 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2218 pVM,
2219 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2220 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2221 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2222 VM_ASSERT_EMT(pVM);
2223 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2224 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2225 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2226 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2227
2228 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2229 if (rc == 0)
2230 {
2231 /*
2232 * Start the virtual time.
2233 */
2234 TMR3NotifyResume(pVM, pVCpu);
2235
2236 /*
2237 * The Outer Main Loop.
2238 */
2239 bool fFFDone = false;
2240
2241 /* Reschedule right away to start in the right state. */
2242 rc = VINF_SUCCESS;
2243
2244 /* If resuming after a pause or a state load, restore the previous
2245 state or else we'll start executing code. Else, just reschedule. */
2246 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2247 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2248 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2249 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2250 else
2251 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2252 pVCpu->em.s.cIemThenRemInstructions = 0;
2253 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2254
2255 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2256 for (;;)
2257 {
2258 /*
2259 * Before we can schedule anything (we're here because
2260 * scheduling is required) we must service any pending
2261 * forced actions to avoid any pending action causing
2262 * immediate rescheduling upon entering an inner loop
2263 *
2264 * Do forced actions.
2265 */
2266 if ( !fFFDone
2267 && RT_SUCCESS(rc)
2268 && rc != VINF_EM_TERMINATE
2269 && rc != VINF_EM_OFF
2270 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2271 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2272 {
2273 rc = emR3ForcedActions(pVM, pVCpu, rc);
2274 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2275 }
2276 else if (fFFDone)
2277 fFFDone = false;
2278
2279#if defined(VBOX_STRICT) && !defined(VBOX_VMM_TARGET_ARMV8)
2280 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
2281#endif
2282
2283 /*
2284 * Now what to do?
2285 */
2286 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2287 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2288 switch (rc)
2289 {
2290 /*
2291 * Keep doing what we're currently doing.
2292 */
2293 case VINF_SUCCESS:
2294 break;
2295
2296 /*
2297 * Reschedule - to raw-mode execution.
2298 */
2299/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2300 case VINF_EM_RESCHEDULE_RAW:
2301 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2302 AssertLogRelFailed();
2303 pVCpu->em.s.enmState = EMSTATE_NONE;
2304 break;
2305
2306 /*
2307 * Reschedule - to HM or NEM.
2308 */
2309 case VINF_EM_RESCHEDULE_HM:
2310 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2311#if !defined(VBOX_VMM_TARGET_ARMV8)
2312 if (VM_IS_HM_ENABLED(pVM))
2313 {
2314 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
2315 {
2316 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2317 pVCpu->em.s.enmState = EMSTATE_HM;
2318 }
2319 else
2320 {
2321 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_IEM_THEN_REM)\n", enmOldState, EMSTATE_IEM_THEN_REM));
2322 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2323 }
2324 }
2325 else
2326#endif
2327 if (VM_IS_NEM_ENABLED(pVM))
2328 {
2329 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2330 pVCpu->em.s.enmState = EMSTATE_NEM;
2331 }
2332 else
2333 {
2334 AssertLogRelFailed();
2335 pVCpu->em.s.enmState = EMSTATE_NONE;
2336 }
2337 break;
2338
2339 /*
2340 * Reschedule - to recompiled execution.
2341 */
2342 case VINF_EM_RESCHEDULE_REM:
2343 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2344 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2345 enmOldState, EMSTATE_IEM_THEN_REM));
2346 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2347 {
2348 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2349 pVCpu->em.s.cIemThenRemInstructions = 0;
2350 }
2351 break;
2352
2353 /*
2354 * Resume.
2355 */
2356 case VINF_EM_RESUME:
2357 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2358 /* Don't reschedule in the halted or wait for SIPI case. */
2359 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2360 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2361 {
2362 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2363 break;
2364 }
2365 /* fall through and get scheduled. */
2366 RT_FALL_THRU();
2367
2368 /*
2369 * Reschedule.
2370 */
2371 case VINF_EM_RESCHEDULE:
2372 {
2373 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2374 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2375 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2376 pVCpu->em.s.cIemThenRemInstructions = 0;
2377 pVCpu->em.s.enmState = enmState;
2378 break;
2379 }
2380
2381 /*
2382 * Halted.
2383 */
2384 case VINF_EM_HALT:
2385 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2386 pVCpu->em.s.enmState = EMSTATE_HALTED;
2387 break;
2388
2389 /*
2390 * Switch to the wait for SIPI state (application processor only)
2391 */
2392 case VINF_EM_WAIT_SIPI:
2393 Assert(pVCpu->idCpu != 0);
2394 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2395 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2396 break;
2397
2398
2399 /*
2400 * Suspend.
2401 */
2402 case VINF_EM_SUSPEND:
2403 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2404 Assert(enmOldState != EMSTATE_SUSPENDED);
2405 pVCpu->em.s.enmPrevState = enmOldState;
2406 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2407 break;
2408
2409 /*
2410 * Reset.
2411 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2412 */
2413 case VINF_EM_RESET:
2414 {
2415 if (pVCpu->idCpu == 0)
2416 {
2417 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2418 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2419 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2420 pVCpu->em.s.cIemThenRemInstructions = 0;
2421 pVCpu->em.s.enmState = enmState;
2422 }
2423 else
2424 {
2425 /* All other VCPUs go into the wait for SIPI state. */
2426 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2427 }
2428 break;
2429 }
2430
2431 /*
2432 * Power Off.
2433 */
2434 case VINF_EM_OFF:
2435 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2436 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2437 TMR3NotifySuspend(pVM, pVCpu);
2438 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2439 return rc;
2440
2441 /*
2442 * Terminate the VM.
2443 */
2444 case VINF_EM_TERMINATE:
2445 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2446 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2447 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2448 TMR3NotifySuspend(pVM, pVCpu);
2449 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2450 return rc;
2451
2452
2453 /*
2454 * Out of memory, suspend the VM and stuff.
2455 */
2456 case VINF_EM_NO_MEMORY:
2457 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2458 Assert(enmOldState != EMSTATE_SUSPENDED);
2459 pVCpu->em.s.enmPrevState = enmOldState;
2460 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2461 TMR3NotifySuspend(pVM, pVCpu);
2462 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2463
2464 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2465 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2466 if (rc != VINF_EM_SUSPEND)
2467 {
2468 if (RT_SUCCESS_NP(rc))
2469 {
2470 AssertLogRelMsgFailed(("%Rrc\n", rc));
2471 rc = VERR_EM_INTERNAL_ERROR;
2472 }
2473 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2474 }
2475 return rc;
2476
2477 /*
2478 * Guest debug events.
2479 */
2480 case VINF_EM_DBG_STEPPED:
2481 case VINF_EM_DBG_STOP:
2482 case VINF_EM_DBG_EVENT:
2483 case VINF_EM_DBG_BREAKPOINT:
2484 case VINF_EM_DBG_STEP:
2485 if (enmOldState == EMSTATE_RAW)
2486 {
2487 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2488 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2489 }
2490 else if (enmOldState == EMSTATE_HM)
2491 {
2492 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2493 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2494 }
2495 else if (enmOldState == EMSTATE_NEM)
2496 {
2497 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2498 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2499 }
2500 else if (enmOldState == EMSTATE_REM)
2501 {
2502 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2503 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2504 }
2505 else
2506 {
2507 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2508 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2509 }
2510 break;
2511
2512 /*
2513 * Hypervisor debug events.
2514 */
2515 case VINF_EM_DBG_HYPER_STEPPED:
2516 case VINF_EM_DBG_HYPER_BREAKPOINT:
2517 case VINF_EM_DBG_HYPER_ASSERTION:
2518 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2519 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2520 break;
2521
2522 /*
2523 * Triple fault.
2524 */
2525 case VINF_EM_TRIPLE_FAULT:
2526 if (!pVM->em.s.fGuruOnTripleFault)
2527 {
2528 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2529 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2530 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2531 continue;
2532 }
2533 /* Else fall through and trigger a guru. */
2534 RT_FALL_THRU();
2535
2536 case VERR_VMM_RING0_ASSERTION:
2537 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2538 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2539 break;
2540
2541 /*
2542 * Any error code showing up here other than the ones we
2543 * know and process above are considered to be FATAL.
2544 *
2545 * Unknown warnings and informational status codes are also
2546 * included in this.
2547 */
2548 default:
2549 if (RT_SUCCESS_NP(rc))
2550 {
2551 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2552 rc = VERR_EM_INTERNAL_ERROR;
2553 }
2554 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2555 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2556 break;
2557 }
2558
2559 /*
2560 * Act on state transition.
2561 */
2562 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2563 if (enmOldState != enmNewState)
2564 {
2565 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2566
2567 /* Clear MWait flags and the unhalt FF. */
2568 if ( enmOldState == EMSTATE_HALTED
2569 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2570 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2571 && ( enmNewState == EMSTATE_RAW
2572 || enmNewState == EMSTATE_HM
2573 || enmNewState == EMSTATE_NEM
2574 || enmNewState == EMSTATE_REM
2575 || enmNewState == EMSTATE_IEM_THEN_REM
2576 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2577 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2578 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2579 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2580 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2581 {
2582 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2583 {
2584 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2585 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2586 }
2587 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2588 {
2589 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2590 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2591 }
2592 }
2593 }
2594 else
2595 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2596
2597 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2598 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2599
2600 /*
2601 * Act on the new state.
2602 */
2603 switch (enmNewState)
2604 {
2605 /*
2606 * Execute raw.
2607 */
2608 case EMSTATE_RAW:
2609 AssertLogRelMsgFailed(("%Rrc\n", rc));
2610 rc = VERR_EM_INTERNAL_ERROR;
2611 break;
2612
2613 /*
2614 * Execute hardware accelerated raw.
2615 */
2616 case EMSTATE_HM:
2617#if defined(VBOX_VMM_TARGET_ARMV8)
2618 AssertReleaseFailed(); /* Should never get here. */
2619#else
2620 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2621#endif
2622 break;
2623
2624 /*
2625 * Execute hardware accelerated raw.
2626 */
2627 case EMSTATE_NEM:
2628 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2629 break;
2630
2631 /*
2632 * Execute recompiled.
2633 */
2634 case EMSTATE_REM:
2635 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2636 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2637 break;
2638
2639 /*
2640 * Execute in the interpreter.
2641 */
2642 case EMSTATE_IEM:
2643 {
2644 uint32_t cInstructions = 0;
2645#if 0 /* For testing purposes. */
2646 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2647 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2648 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2649 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2650 rc = VINF_SUCCESS;
2651 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2652#endif
2653 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2654 if (pVM->em.s.fIemExecutesAll)
2655 {
2656 Assert(rc != VINF_EM_RESCHEDULE_REM);
2657 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2658 Assert(rc != VINF_EM_RESCHEDULE_HM);
2659#ifdef VBOX_HIGH_RES_TIMERS_HACK
2660 if (cInstructions < 2048)
2661 TMTimerPollVoid(pVM, pVCpu);
2662#endif
2663 }
2664 fFFDone = false;
2665 break;
2666 }
2667
2668 /*
2669 * Execute in IEM, hoping we can quickly switch aback to HM
2670 * or RAW execution. If our hopes fail, we go to REM.
2671 */
2672 case EMSTATE_IEM_THEN_REM:
2673 {
2674 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2675 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2676 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2677 break;
2678 }
2679
2680 /*
2681 * Application processor execution halted until SIPI.
2682 */
2683 case EMSTATE_WAIT_SIPI:
2684 /* no break */
2685 /*
2686 * hlt - execution halted until interrupt.
2687 */
2688 case EMSTATE_HALTED:
2689 {
2690 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2691 /* If HM (or someone else) store a pending interrupt in
2692 TRPM, it must be dispatched ASAP without any halting.
2693 Anything pending in TRPM has been accepted and the CPU
2694 should already be the right state to receive it. */
2695 if (TRPMHasTrap(pVCpu))
2696 rc = VINF_EM_RESCHEDULE;
2697 /* MWAIT has a special extension where it's woken up when
2698 an interrupt is pending even when IF=0. */
2699 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2700 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2701 {
2702 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2703 if (rc == VINF_SUCCESS)
2704 {
2705#if defined(VBOX_VMM_TARGET_ARMV8)
2706 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
2707 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2708 {
2709 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2710 rc = VINF_EM_RESCHEDULE;
2711 }
2712#else
2713 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2714 APICUpdatePendingInterrupts(pVCpu);
2715
2716 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2717 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2718 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2719 {
2720 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2721 rc = VINF_EM_RESCHEDULE;
2722 }
2723#endif
2724 }
2725 }
2726 else
2727 {
2728#if defined(VBOX_VMM_TARGET_ARMV8)
2729 uint32_t fWaitHalted = (CPUMGetGuestIrqMasked(pVCpu) ? VMWAITHALTED_F_IGNORE_IRQS : 0)
2730 | (CPUMGetGuestFiqMasked(pVCpu) ? VMWAITHALTED_F_IGNORE_FIQS : 0);
2731#else
2732 uint32_t fWaitHalted = (CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF) ? 0 : VMWAITHALTED_F_IGNORE_IRQS;
2733#endif
2734 rc = VMR3WaitHalted(pVM, pVCpu, fWaitHalted);
2735 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2736 check VMCPU_FF_UPDATE_APIC here. */
2737 if ( rc == VINF_SUCCESS
2738 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2739 {
2740 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2741 rc = VINF_EM_RESCHEDULE;
2742 }
2743 }
2744
2745 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2746 break;
2747 }
2748
2749 /*
2750 * Suspended - return to VM.cpp.
2751 */
2752 case EMSTATE_SUSPENDED:
2753 TMR3NotifySuspend(pVM, pVCpu);
2754 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2755 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2756 return VINF_EM_SUSPEND;
2757
2758 /*
2759 * Debugging in the guest.
2760 */
2761 case EMSTATE_DEBUG_GUEST_RAW:
2762 case EMSTATE_DEBUG_GUEST_HM:
2763 case EMSTATE_DEBUG_GUEST_NEM:
2764 case EMSTATE_DEBUG_GUEST_IEM:
2765 case EMSTATE_DEBUG_GUEST_REM:
2766 TMR3NotifySuspend(pVM, pVCpu);
2767 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2768 TMR3NotifyResume(pVM, pVCpu);
2769 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2770 break;
2771
2772 /*
2773 * Debugging in the hypervisor.
2774 */
2775 case EMSTATE_DEBUG_HYPER:
2776 {
2777 TMR3NotifySuspend(pVM, pVCpu);
2778 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2779
2780 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2781 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2782 if (rc != VINF_SUCCESS)
2783 {
2784 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2785 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2786 else
2787 {
2788 /* switch to guru meditation mode */
2789 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2790 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2791 VMMR3FatalDump(pVM, pVCpu, rc);
2792 }
2793 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2794 return rc;
2795 }
2796
2797 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2798 TMR3NotifyResume(pVM, pVCpu);
2799 break;
2800 }
2801
2802 /*
2803 * Guru meditation takes place in the debugger.
2804 */
2805 case EMSTATE_GURU_MEDITATION:
2806 {
2807 TMR3NotifySuspend(pVM, pVCpu);
2808 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2809 VMMR3FatalDump(pVM, pVCpu, rc);
2810 emR3Debug(pVM, pVCpu, rc);
2811 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2812 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2813 return rc;
2814 }
2815
2816 /*
2817 * The states we don't expect here.
2818 */
2819 case EMSTATE_NONE:
2820 case EMSTATE_TERMINATING:
2821 default:
2822 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2823 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2824 TMR3NotifySuspend(pVM, pVCpu);
2825 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2826 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2827 return VERR_EM_INTERNAL_ERROR;
2828 }
2829 } /* The Outer Main Loop */
2830 }
2831 else
2832 {
2833 /*
2834 * Fatal error.
2835 */
2836 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2837 TMR3NotifySuspend(pVM, pVCpu);
2838 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2839 VMMR3FatalDump(pVM, pVCpu, rc);
2840 emR3Debug(pVM, pVCpu, rc);
2841 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2842 /** @todo change the VM state! */
2843 return rc;
2844 }
2845
2846 /* not reached */
2847}
2848
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette