VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 96860

Last change on this file since 96860 was 96407, checked in by vboxsync, 21 months ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.4 KB
Line 
1/* $Id: EMAll.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/mm.h>
35#include <VBox/vmm/selm.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/vmm/hm.h>
40#include <VBox/vmm/pdmapi.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/stam.h>
43#include "EMInternal.h"
44#include <VBox/vmm/vmcc.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include <VBox/log.h>
50#include <iprt/assert.h>
51#include <iprt/string.h>
52
53
54
55
56/**
57 * Get the current execution manager status.
58 *
59 * @returns Current status.
60 * @param pVCpu The cross context virtual CPU structure.
61 */
62VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
63{
64 return pVCpu->em.s.enmState;
65}
66
67
68/**
69 * Sets the current execution manager status. (use only when you know what you're doing!)
70 *
71 * @param pVCpu The cross context virtual CPU structure.
72 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
73 */
74VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
75{
76 /* Only allowed combination: */
77 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
78 pVCpu->em.s.enmState = enmNewState;
79}
80
81
82/**
83 * Sets the PC for which interrupts should be inhibited.
84 *
85 * @param pVCpu The cross context virtual CPU structure.
86 * @param PC The PC.
87 */
88VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
89{
90 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
91 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
92}
93
94
95/**
96 * Gets the PC for which interrupts should be inhibited.
97 *
98 * There are a few instructions which inhibits or delays interrupts
99 * for the instruction following them. These instructions are:
100 * - STI
101 * - MOV SS, r/m16
102 * - POP SS
103 *
104 * @returns The PC for which interrupts should be inhibited.
105 * @param pVCpu The cross context virtual CPU structure.
106 *
107 */
108VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
109{
110 return pVCpu->em.s.GCPtrInhibitInterrupts;
111}
112
113
114/**
115 * Checks if interrupt inhibiting is enabled for the current instruction.
116 *
117 * @returns true if interrupts are inhibited, false if not.
118 * @param pVCpu The cross context virtual CPU structure.
119 */
120VMMDECL(bool) EMIsInhibitInterruptsActive(PVMCPU pVCpu)
121{
122 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
123 return false;
124 if (pVCpu->em.s.GCPtrInhibitInterrupts == CPUMGetGuestRIP(pVCpu))
125 return true;
126 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
127 return false;
128}
129
130
131/**
132 * Enables / disable hypercall instructions.
133 *
134 * This interface is used by GIM to tell the execution monitors whether the
135 * hypercall instruction (VMMCALL & VMCALL) are allowed or should \#UD.
136 *
137 * @param pVCpu The cross context virtual CPU structure this applies to.
138 * @param fEnabled Whether hypercall instructions are enabled (true) or not.
139 */
140VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled)
141{
142 pVCpu->em.s.fHypercallEnabled = fEnabled;
143}
144
145
146/**
147 * Checks if hypercall instructions (VMMCALL & VMCALL) are enabled or not.
148 *
149 * @returns true if enabled, false if not.
150 * @param pVCpu The cross context virtual CPU structure.
151 *
152 * @note If this call becomes a performance factor, we can make the data
153 * field available thru a read-only view in VMCPU. See VM::cpum.ro.
154 */
155VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu)
156{
157 return pVCpu->em.s.fHypercallEnabled;
158}
159
160
161/**
162 * Prepare an MWAIT - essentials of the MONITOR instruction.
163 *
164 * @returns VINF_SUCCESS
165 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
166 * @param rax The content of RAX.
167 * @param rcx The content of RCX.
168 * @param rdx The content of RDX.
169 * @param GCPhys The physical address corresponding to rax.
170 */
171VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
172{
173 pVCpu->em.s.MWait.uMonitorRAX = rax;
174 pVCpu->em.s.MWait.uMonitorRCX = rcx;
175 pVCpu->em.s.MWait.uMonitorRDX = rdx;
176 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
177 /** @todo Make use of GCPhys. */
178 NOREF(GCPhys);
179 /** @todo Complete MONITOR implementation. */
180 return VINF_SUCCESS;
181}
182
183
184/**
185 * Checks if the monitor hardware is armed / active.
186 *
187 * @returns true if armed, false otherwise.
188 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
189 */
190VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
191{
192 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
193}
194
195
196/**
197 * Checks if we're in a MWAIT.
198 *
199 * @retval 1 if regular,
200 * @retval > 1 if MWAIT with EMMWAIT_FLAG_BREAKIRQIF0
201 * @retval 0 if not armed
202 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
203 */
204VMM_INT_DECL(unsigned) EMMonitorWaitIsActive(PVMCPU pVCpu)
205{
206 uint32_t fWait = pVCpu->em.s.MWait.fWait;
207 AssertCompile(EMMWAIT_FLAG_ACTIVE == 1);
208 AssertCompile(EMMWAIT_FLAG_BREAKIRQIF0 == 2);
209 AssertCompile((EMMWAIT_FLAG_ACTIVE << 1) == EMMWAIT_FLAG_BREAKIRQIF0);
210 return fWait & (EMMWAIT_FLAG_ACTIVE | ((fWait & EMMWAIT_FLAG_ACTIVE) << 1));
211}
212
213
214/**
215 * Performs an MWAIT.
216 *
217 * @returns VINF_SUCCESS
218 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
219 * @param rax The content of RAX.
220 * @param rcx The content of RCX.
221 */
222VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
223{
224 pVCpu->em.s.MWait.uMWaitRAX = rax;
225 pVCpu->em.s.MWait.uMWaitRCX = rcx;
226 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
227 if (rcx)
228 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
229 else
230 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
231 /** @todo not completely correct?? */
232 return VINF_EM_HALT;
233}
234
235
236/**
237 * Clears any address-range monitoring that is active.
238 *
239 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
240 */
241VMM_INT_DECL(void) EMMonitorWaitClear(PVMCPU pVCpu)
242{
243 LogFlowFunc(("Clearing MWAIT\n"));
244 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
245}
246
247
248/**
249 * Determine if we should continue execution in HM after encountering an mwait
250 * instruction.
251 *
252 * Clears MWAIT flags if returning @c true.
253 *
254 * @returns true if we should continue, false if we should halt.
255 * @param pVCpu The cross context virtual CPU structure.
256 * @param pCtx Current CPU context.
257 */
258VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
259{
260 if (CPUMGetGuestGif(pCtx))
261 {
262 if ( CPUMIsGuestPhysIntrEnabled(pVCpu)
263 || ( CPUMIsGuestInNestedHwvirtMode(pCtx)
264 && CPUMIsGuestVirtIntrEnabled(pVCpu))
265 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
266 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
267 {
268 if (VMCPU_FF_IS_ANY_SET(pVCpu, ( VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
269 | VMCPU_FF_INTERRUPT_NESTED_GUEST)))
270 {
271 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
272 return true;
273 }
274 }
275 }
276
277 return false;
278}
279
280
281/**
282 * Determine if we should continue execution in HM after encountering a hlt
283 * instruction.
284 *
285 * @returns true if we should continue, false if we should halt.
286 * @param pVCpu The cross context virtual CPU structure.
287 * @param pCtx Current CPU context.
288 */
289VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
290{
291 if (CPUMGetGuestGif(pCtx))
292 {
293 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
294 return VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
295
296 if ( CPUMIsGuestInNestedHwvirtMode(pCtx)
297 && CPUMIsGuestVirtIntrEnabled(pVCpu))
298 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
299 }
300 return false;
301}
302
303
304/**
305 * Unhalts and wakes up the given CPU.
306 *
307 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
308 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
309 * the CPU isn't currently in a halt, the next HLT instruction it executes will
310 * be affected.
311 *
312 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
313 * @param pVM The cross context VM structure.
314 * @param pVCpuDst The cross context virtual CPU structure of the
315 * CPU to unhalt and wake up. This is usually not the
316 * same as the caller.
317 * @thread EMT
318 */
319VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVMCC pVM, PVMCPUCC pVCpuDst)
320{
321 /*
322 * Flag the current(/next) HLT to unhalt immediately.
323 */
324 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
325
326 /*
327 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
328 * just do it here for now).
329 */
330#ifdef IN_RING0
331 /* We might be here with preemption disabled or enabled (i.e. depending on
332 thread-context hooks being used), so don't try obtaining the GVMMR0 used
333 lock here. See @bugref{7270#c148}. */
334 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
335 AssertRC(rc);
336
337#elif defined(IN_RING3)
338 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, 0 /*fFlags*/);
339 int rc = VINF_SUCCESS;
340 RT_NOREF(pVM);
341
342#else
343 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
344 Assert(pVM->cCpus == 1); NOREF(pVM);
345 int rc = VINF_SUCCESS;
346#endif
347 return rc;
348}
349
350#ifndef IN_RING3
351
352/**
353 * Makes an I/O port write pending for ring-3 processing.
354 *
355 * @returns VINF_EM_PENDING_R3_IOPORT_READ
356 * @param pVCpu The cross context virtual CPU structure.
357 * @param uPort The I/O port.
358 * @param cbInstr The instruction length (for RIP updating).
359 * @param cbValue The write size.
360 * @param uValue The value being written.
361 * @sa emR3ExecutePendingIoPortWrite
362 *
363 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
364 */
365VMMRZ_INT_DECL(VBOXSTRICTRC)
366EMRZSetPendingIoPortWrite(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue, uint32_t uValue)
367{
368 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
369 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
370 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
371 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
372 pVCpu->em.s.PendingIoPortAccess.uValue = uValue;
373 return VINF_EM_PENDING_R3_IOPORT_WRITE;
374}
375
376
377/**
378 * Makes an I/O port read pending for ring-3 processing.
379 *
380 * @returns VINF_EM_PENDING_R3_IOPORT_READ
381 * @param pVCpu The cross context virtual CPU structure.
382 * @param uPort The I/O port.
383 * @param cbInstr The instruction length (for RIP updating).
384 * @param cbValue The read size.
385 * @sa emR3ExecutePendingIoPortRead
386 *
387 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
388 */
389VMMRZ_INT_DECL(VBOXSTRICTRC)
390EMRZSetPendingIoPortRead(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue)
391{
392 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
393 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
394 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
395 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
396 pVCpu->em.s.PendingIoPortAccess.uValue = UINT32_C(0x52454144); /* 'READ' */
397 return VINF_EM_PENDING_R3_IOPORT_READ;
398}
399
400#endif /* IN_RING3 */
401
402
403/**
404 * Worker for EMHistoryExec that checks for ring-3 returns and flags
405 * continuation of the EMHistoryExec run there.
406 */
407DECL_FORCE_INLINE(void) emHistoryExecSetContinueExitRecIdx(PVMCPU pVCpu, VBOXSTRICTRC rcStrict, PCEMEXITREC pExitRec)
408{
409 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
410#ifdef IN_RING3
411 RT_NOREF_PV(rcStrict); RT_NOREF_PV(pExitRec);
412#else
413 switch (VBOXSTRICTRC_VAL(rcStrict))
414 {
415 case VINF_SUCCESS:
416 default:
417 break;
418
419 /*
420 * Only status codes that EMHandleRCTmpl.h will resume EMHistoryExec with.
421 */
422 case VINF_IOM_R3_IOPORT_READ: /* -> emR3ExecuteIOInstruction */
423 case VINF_IOM_R3_IOPORT_WRITE: /* -> emR3ExecuteIOInstruction */
424 case VINF_IOM_R3_IOPORT_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
425 case VINF_IOM_R3_MMIO_READ: /* -> emR3ExecuteInstruction */
426 case VINF_IOM_R3_MMIO_WRITE: /* -> emR3ExecuteInstruction */
427 case VINF_IOM_R3_MMIO_READ_WRITE: /* -> emR3ExecuteInstruction */
428 case VINF_IOM_R3_MMIO_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
429 case VINF_CPUM_R3_MSR_READ: /* -> emR3ExecuteInstruction */
430 case VINF_CPUM_R3_MSR_WRITE: /* -> emR3ExecuteInstruction */
431 case VINF_GIM_R3_HYPERCALL: /* -> emR3ExecuteInstruction */
432 pVCpu->em.s.idxContinueExitRec = (uint16_t)(pExitRec - &pVCpu->em.s.aExitRecords[0]);
433 break;
434 }
435#endif /* !IN_RING3 */
436}
437
438
439/**
440 * Execute using history.
441 *
442 * This function will be called when EMHistoryAddExit() and friends returns a
443 * non-NULL result. This happens in response to probing or when probing has
444 * uncovered adjacent exits which can more effectively be reached by using IEM
445 * than restarting execution using the main execution engine and fielding an
446 * regular exit.
447 *
448 * @returns VBox strict status code, see IEMExecForExits.
449 * @param pVCpu The cross context virtual CPU structure.
450 * @param pExitRec The exit record return by a previous history add
451 * or update call.
452 * @param fWillExit Flags indicating to IEM what will cause exits, TBD.
453 */
454VMM_INT_DECL(VBOXSTRICTRC) EMHistoryExec(PVMCPUCC pVCpu, PCEMEXITREC pExitRec, uint32_t fWillExit)
455{
456 Assert(pExitRec);
457 VMCPU_ASSERT_EMT(pVCpu);
458 IEMEXECFOREXITSTATS ExecStats;
459 switch (pExitRec->enmAction)
460 {
461 /*
462 * Executes multiple instruction stopping only when we've gone a given
463 * number without perceived exits.
464 */
465 case EMEXITACTION_EXEC_WITH_MAX:
466 {
467 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryExec, a);
468 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %RX64, max %u\n", pExitRec->uFlatPC, pExitRec->cMaxInstructionsWithoutExit));
469 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
470 pExitRec->cMaxInstructionsWithoutExit /* cMinInstructions*/,
471 pVCpu->em.s.cHistoryExecMaxInstructions,
472 pExitRec->cMaxInstructionsWithoutExit,
473 &ExecStats);
474 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
475 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
476 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRec);
477
478 /* Ignore instructions IEM doesn't know about. */
479 if ( ( rcStrict != VERR_IEM_INSTR_NOT_IMPLEMENTED
480 && rcStrict != VERR_IEM_ASPECT_NOT_IMPLEMENTED)
481 || ExecStats.cInstructions == 0)
482 { /* likely */ }
483 else
484 rcStrict = VINF_SUCCESS;
485
486 if (ExecStats.cExits > 1)
487 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecSavedExits, ExecStats.cExits - 1);
488 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecInstructions, ExecStats.cInstructions);
489 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryExec, a);
490 return rcStrict;
491 }
492
493 /*
494 * Probe a exit for close by exits.
495 */
496 case EMEXITACTION_EXEC_PROBE:
497 {
498 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryProbe, b);
499 LogFlow(("EMHistoryExec/EXEC_PROBE: %RX64\n", pExitRec->uFlatPC));
500 PEMEXITREC pExitRecUnconst = (PEMEXITREC)pExitRec;
501 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
502 pVCpu->em.s.cHistoryProbeMinInstructions,
503 pVCpu->em.s.cHistoryExecMaxInstructions,
504 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit,
505 &ExecStats);
506 LogFlow(("EMHistoryExec/EXEC_PROBE: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
507 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
508 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRecUnconst);
509 if ( ExecStats.cExits >= 2
510 && RT_SUCCESS(rcStrict))
511 {
512 Assert(ExecStats.cMaxExitDistance > 0 && ExecStats.cMaxExitDistance <= 32);
513 pExitRecUnconst->cMaxInstructionsWithoutExit = ExecStats.cMaxExitDistance;
514 pExitRecUnconst->enmAction = EMEXITACTION_EXEC_WITH_MAX;
515 LogFlow(("EMHistoryExec/EXEC_PROBE: -> EXEC_WITH_MAX %u\n", ExecStats.cMaxExitDistance));
516 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedExecWithMax);
517 }
518#ifndef IN_RING3
519 else if ( pVCpu->em.s.idxContinueExitRec != UINT16_MAX
520 && RT_SUCCESS(rcStrict))
521 {
522 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedToRing3);
523 LogFlow(("EMHistoryExec/EXEC_PROBE: -> ring-3\n"));
524 }
525#endif
526 else
527 {
528 pExitRecUnconst->enmAction = EMEXITACTION_NORMAL_PROBED;
529 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
530 LogFlow(("EMHistoryExec/EXEC_PROBE: -> PROBED\n"));
531 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedNormal);
532 if ( rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED
533 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
534 rcStrict = VINF_SUCCESS;
535 }
536 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryProbeInstructions, ExecStats.cInstructions);
537 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryProbe, b);
538 return rcStrict;
539 }
540
541 /* We shouldn't ever see these here! */
542 case EMEXITACTION_FREE_RECORD:
543 case EMEXITACTION_NORMAL:
544 case EMEXITACTION_NORMAL_PROBED:
545 break;
546
547 /* No default case, want compiler warnings. */
548 }
549 AssertLogRelFailedReturn(VERR_EM_INTERNAL_ERROR);
550}
551
552
553/**
554 * Worker for emHistoryAddOrUpdateRecord.
555 */
556DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInit(PEMEXITREC pExitRec, uint64_t uFlatPC, uint32_t uFlagsAndType, uint64_t uExitNo)
557{
558 pExitRec->uFlatPC = uFlatPC;
559 pExitRec->uFlagsAndType = uFlagsAndType;
560 pExitRec->enmAction = EMEXITACTION_NORMAL;
561 pExitRec->bUnused = 0;
562 pExitRec->cMaxInstructionsWithoutExit = 64;
563 pExitRec->uLastExitNo = uExitNo;
564 pExitRec->cHits = 1;
565 return NULL;
566}
567
568
569/**
570 * Worker for emHistoryAddOrUpdateRecord.
571 */
572DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitNew(PVMCPU pVCpu, PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
573 PEMEXITREC pExitRec, uint64_t uFlatPC,
574 uint32_t uFlagsAndType, uint64_t uExitNo)
575{
576 pHistEntry->idxSlot = (uint32_t)idxSlot;
577 pVCpu->em.s.cExitRecordUsed++;
578 LogFlow(("emHistoryRecordInitNew: [%#x] = %#07x %016RX64; (%u of %u used)\n", idxSlot, uFlagsAndType, uFlatPC,
579 pVCpu->em.s.cExitRecordUsed, RT_ELEMENTS(pVCpu->em.s.aExitRecords) ));
580 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
581}
582
583
584/**
585 * Worker for emHistoryAddOrUpdateRecord.
586 */
587DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitReplacement(PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
588 PEMEXITREC pExitRec, uint64_t uFlatPC,
589 uint32_t uFlagsAndType, uint64_t uExitNo)
590{
591 pHistEntry->idxSlot = (uint32_t)idxSlot;
592 LogFlow(("emHistoryRecordInitReplacement: [%#x] = %#07x %016RX64 replacing %#07x %016RX64 with %u hits, %u exits old\n",
593 idxSlot, uFlagsAndType, uFlatPC, pExitRec->uFlagsAndType, pExitRec->uFlatPC, pExitRec->cHits,
594 uExitNo - pExitRec->uLastExitNo));
595 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
596}
597
598
599/**
600 * Adds or updates the EMEXITREC for this PC/type and decide on an action.
601 *
602 * @returns Pointer to an exit record if special action should be taken using
603 * EMHistoryExec(). Take normal exit action when NULL.
604 *
605 * @param pVCpu The cross context virtual CPU structure.
606 * @param uFlagsAndType Combined flags and type, EMEXIT_F_KIND_EM set and
607 * both EMEXIT_F_CS_EIP and EMEXIT_F_UNFLATTENED_PC are clear.
608 * @param uFlatPC The flattened program counter.
609 * @param pHistEntry The exit history entry.
610 * @param uExitNo The current exit number.
611 */
612static PCEMEXITREC emHistoryAddOrUpdateRecord(PVMCPU pVCpu, uint64_t uFlagsAndType, uint64_t uFlatPC,
613 PEMEXITENTRY pHistEntry, uint64_t uExitNo)
614{
615# ifdef IN_RING0
616 /* Disregard the hm flag. */
617 uFlagsAndType &= ~EMEXIT_F_HM;
618# endif
619
620 /*
621 * Work the hash table.
622 */
623 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitRecords) == 1024);
624# define EM_EXIT_RECORDS_IDX_MASK 0x3ff
625 uintptr_t idxSlot = ((uintptr_t)uFlatPC >> 1) & EM_EXIT_RECORDS_IDX_MASK;
626 PEMEXITREC pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
627 if (pExitRec->uFlatPC == uFlatPC)
628 {
629 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
630 pHistEntry->idxSlot = (uint32_t)idxSlot;
631 if (pExitRec->uFlagsAndType == uFlagsAndType)
632 {
633 pExitRec->uLastExitNo = uExitNo;
634 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[0]);
635 }
636 else
637 {
638 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[0]);
639 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
640 }
641 }
642 else if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
643 {
644 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[0]);
645 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
646 }
647 else
648 {
649 /*
650 * Collision. We calculate a new hash for stepping away from the first,
651 * doing up to 8 steps away before replacing the least recently used record.
652 */
653 uintptr_t idxOldest = idxSlot;
654 uint64_t uOldestExitNo = pExitRec->uLastExitNo;
655 unsigned iOldestStep = 0;
656 unsigned iStep = 1;
657 uintptr_t const idxAdd = (uintptr_t)(uFlatPC >> 11) & (EM_EXIT_RECORDS_IDX_MASK / 4);
658 for (;;)
659 {
660 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
661 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecNew) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
662 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
663 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecTypeChanged) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
664
665 /* Step to the next slot. */
666 idxSlot += idxAdd;
667 idxSlot &= EM_EXIT_RECORDS_IDX_MASK;
668 pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
669
670 /* Does it match? */
671 if (pExitRec->uFlatPC == uFlatPC)
672 {
673 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
674 pHistEntry->idxSlot = (uint32_t)idxSlot;
675 if (pExitRec->uFlagsAndType == uFlagsAndType)
676 {
677 pExitRec->uLastExitNo = uExitNo;
678 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[iStep]);
679 break;
680 }
681 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[iStep]);
682 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
683 }
684
685 /* Is it free? */
686 if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
687 {
688 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[iStep]);
689 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
690 }
691
692 /* Is it the least recently used one? */
693 if (pExitRec->uLastExitNo < uOldestExitNo)
694 {
695 uOldestExitNo = pExitRec->uLastExitNo;
696 idxOldest = idxSlot;
697 iOldestStep = iStep;
698 }
699
700 /* Next iteration? */
701 iStep++;
702 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced));
703 if (RT_LIKELY(iStep < 8 + 1))
704 { /* likely */ }
705 else
706 {
707 /* Replace the least recently used slot. */
708 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecReplaced[iOldestStep]);
709 pExitRec = &pVCpu->em.s.aExitRecords[idxOldest];
710 return emHistoryRecordInitReplacement(pHistEntry, idxOldest, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
711 }
712 }
713 }
714
715 /*
716 * Found an existing record.
717 */
718 switch (pExitRec->enmAction)
719 {
720 case EMEXITACTION_NORMAL:
721 {
722 uint64_t const cHits = ++pExitRec->cHits;
723 if (cHits < 256)
724 return NULL;
725 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> EXEC_PROBE\n", idxSlot, uFlagsAndType, uFlatPC));
726 pExitRec->enmAction = EMEXITACTION_EXEC_PROBE;
727 return pExitRec;
728 }
729
730 case EMEXITACTION_NORMAL_PROBED:
731 pExitRec->cHits += 1;
732 return NULL;
733
734 default:
735 pExitRec->cHits += 1;
736 return pExitRec;
737
738 /* This will happen if the caller ignores or cannot serve the probe
739 request (forced to ring-3, whatever). We retry this 256 times. */
740 case EMEXITACTION_EXEC_PROBE:
741 {
742 uint64_t const cHits = ++pExitRec->cHits;
743 if (cHits < 512)
744 return pExitRec;
745 pExitRec->enmAction = EMEXITACTION_NORMAL_PROBED;
746 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> PROBED\n", idxSlot, uFlagsAndType, uFlatPC));
747 return NULL;
748 }
749 }
750}
751
752
753/**
754 * Adds an exit to the history for this CPU.
755 *
756 * @returns Pointer to an exit record if special action should be taken using
757 * EMHistoryExec(). Take normal exit action when NULL.
758 *
759 * @param pVCpu The cross context virtual CPU structure.
760 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FT).
761 * @param uFlatPC The flattened program counter (RIP). UINT64_MAX if not available.
762 * @param uTimestamp The TSC value for the exit, 0 if not available.
763 * @thread EMT(pVCpu)
764 */
765VMM_INT_DECL(PCEMEXITREC) EMHistoryAddExit(PVMCPUCC pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC, uint64_t uTimestamp)
766{
767 VMCPU_ASSERT_EMT(pVCpu);
768
769 /*
770 * Add the exit history entry.
771 */
772 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
773 uint64_t uExitNo = pVCpu->em.s.iNextExit++;
774 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
775 pHistEntry->uFlatPC = uFlatPC;
776 pHistEntry->uTimestamp = uTimestamp;
777 pHistEntry->uFlagsAndType = uFlagsAndType;
778 pHistEntry->idxSlot = UINT32_MAX;
779
780 /*
781 * If common exit type, we will insert/update the exit into the exit record hash table.
782 */
783 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
784#ifdef IN_RING0
785 && pVCpu->em.s.fExitOptimizationEnabledR0
786 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
787#else
788 && pVCpu->em.s.fExitOptimizationEnabled
789#endif
790 && uFlatPC != UINT64_MAX
791 )
792 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
793 return NULL;
794}
795
796
797/**
798 * Interface that VT-x uses to supply the PC of an exit when CS:RIP is being read.
799 *
800 * @param pVCpu The cross context virtual CPU structure.
801 * @param uFlatPC The flattened program counter (RIP).
802 * @param fFlattened Set if RIP was subjected to CS.BASE, clear if not.
803 */
804VMM_INT_DECL(void) EMHistoryUpdatePC(PVMCPUCC pVCpu, uint64_t uFlatPC, bool fFlattened)
805{
806 VMCPU_ASSERT_EMT(pVCpu);
807
808 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
809 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
810 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
811 pHistEntry->uFlatPC = uFlatPC;
812 if (fFlattened)
813 pHistEntry->uFlagsAndType &= ~EMEXIT_F_UNFLATTENED_PC;
814 else
815 pHistEntry->uFlagsAndType |= EMEXIT_F_UNFLATTENED_PC;
816}
817
818
819/**
820 * Interface for convering a engine specific exit to a generic one and get guidance.
821 *
822 * @returns Pointer to an exit record if special action should be taken using
823 * EMHistoryExec(). Take normal exit action when NULL.
824 *
825 * @param pVCpu The cross context virtual CPU structure.
826 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
827 * @thread EMT(pVCpu)
828 */
829VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndType(PVMCPUCC pVCpu, uint32_t uFlagsAndType)
830{
831 VMCPU_ASSERT_EMT(pVCpu);
832
833 /*
834 * Do the updating.
835 */
836 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
837 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
838 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
839 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC));
840
841 /*
842 * If common exit type, we will insert/update the exit into the exit record hash table.
843 */
844 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
845#ifdef IN_RING0
846 && pVCpu->em.s.fExitOptimizationEnabledR0
847 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
848#else
849 && pVCpu->em.s.fExitOptimizationEnabled
850#endif
851 && pHistEntry->uFlatPC != UINT64_MAX
852 )
853 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo);
854 return NULL;
855}
856
857
858/**
859 * Interface for convering a engine specific exit to a generic one and get
860 * guidance, supplying flattened PC too.
861 *
862 * @returns Pointer to an exit record if special action should be taken using
863 * EMHistoryExec(). Take normal exit action when NULL.
864 *
865 * @param pVCpu The cross context virtual CPU structure.
866 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
867 * @param uFlatPC The flattened program counter (RIP).
868 * @thread EMT(pVCpu)
869 */
870VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndTypeAndPC(PVMCPUCC pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC)
871{
872 VMCPU_ASSERT_EMT(pVCpu);
873 Assert(uFlatPC != UINT64_MAX);
874
875 /*
876 * Do the updating.
877 */
878 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
879 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
880 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
881 pHistEntry->uFlagsAndType = uFlagsAndType;
882 pHistEntry->uFlatPC = uFlatPC;
883
884 /*
885 * If common exit type, we will insert/update the exit into the exit record hash table.
886 */
887 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
888#ifdef IN_RING0
889 && pVCpu->em.s.fExitOptimizationEnabledR0
890 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
891#else
892 && pVCpu->em.s.fExitOptimizationEnabled
893#endif
894 )
895 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
896 return NULL;
897}
898
899
900/**
901 * @callback_method_impl{FNDISREADBYTES}
902 */
903static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
904{
905 PVMCPUCC pVCpu = (PVMCPUCC)pDis->pvUser;
906 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
907
908 /*
909 * Figure how much we can or must read.
910 */
911 size_t cbToRead = GUEST_PAGE_SIZE - (uSrcAddr & (GUEST_PAGE_SIZE - 1));
912 if (cbToRead > cbMaxRead)
913 cbToRead = cbMaxRead;
914 else if (cbToRead < cbMinRead)
915 cbToRead = cbMinRead;
916
917 int rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
918 if (RT_FAILURE(rc))
919 {
920 if (cbToRead > cbMinRead)
921 {
922 cbToRead = cbMinRead;
923 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
924 }
925 if (RT_FAILURE(rc))
926 {
927 /*
928 * If we fail to find the page via the guest's page tables
929 * we invalidate the page in the host TLB (pertaining to
930 * the guest in the NestedPaging case). See @bugref{6043}.
931 */
932 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
933 {
934 HMInvalidatePage(pVCpu, uSrcAddr);
935 if (((uSrcAddr + cbToRead - 1) >> GUEST_PAGE_SHIFT) != (uSrcAddr >> GUEST_PAGE_SHIFT))
936 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
937 }
938 }
939 }
940
941 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
942 return rc;
943}
944
945
946/**
947 * Disassembles the current instruction.
948 *
949 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
950 * details.
951 *
952 * @param pVM The cross context VM structure.
953 * @param pVCpu The cross context virtual CPU structure.
954 * @param pDis Where to return the parsed instruction info.
955 * @param pcbInstr Where to return the instruction size. (optional)
956 */
957VMM_INT_DECL(int) EMInterpretDisasCurrent(PVMCC pVM, PVMCPUCC pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
958{
959 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
960 RTGCPTR GCPtrInstr;
961#if 0
962 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
963#else
964/** @todo Get the CPU mode as well while we're at it! */
965 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
966 pCtxCore->rip, &GCPtrInstr);
967#endif
968 if (RT_FAILURE(rc))
969 {
970 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
971 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
972 return rc;
973 }
974 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
975}
976
977
978/**
979 * Disassembles one instruction.
980 *
981 * This is used by internally by the interpreter and by trap/access handlers.
982 *
983 * @returns VBox status code.
984 *
985 * @param pVM The cross context VM structure.
986 * @param pVCpu The cross context virtual CPU structure.
987 * @param GCPtrInstr The flat address of the instruction.
988 * @param pCtxCore The context core (used to determine the cpu mode).
989 * @param pDis Where to return the parsed instruction info.
990 * @param pcbInstr Where to return the instruction size. (optional)
991 */
992VMM_INT_DECL(int) EMInterpretDisasOneEx(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
993 PDISCPUSTATE pDis, unsigned *pcbInstr)
994{
995 NOREF(pVM);
996 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
997 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
998 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
999 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
1000 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
1001 if (RT_SUCCESS(rc))
1002 return VINF_SUCCESS;
1003 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
1004 return rc;
1005}
1006
1007
1008/**
1009 * Interprets the current instruction.
1010 *
1011 * @returns VBox status code.
1012 * @retval VINF_* Scheduling instructions.
1013 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1014 * @retval VERR_* Fatal errors.
1015 *
1016 * @param pVCpu The cross context virtual CPU structure.
1017 * @param pRegFrame The register frame.
1018 * Updates the EIP if an instruction was executed successfully.
1019 * @param pvFault The fault address (CR2).
1020 *
1021 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1022 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1023 * to worry about e.g. invalid modrm combinations (!)
1024 */
1025VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1026{
1027 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1028 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1029 NOREF(pvFault);
1030
1031 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1032 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1033 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1034 rc = VERR_EM_INTERPRETER;
1035 if (rc != VINF_SUCCESS)
1036 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1037
1038 return rc;
1039}
1040
1041
1042/**
1043 * Interprets the current instruction.
1044 *
1045 * @returns VBox status code.
1046 * @retval VINF_* Scheduling instructions.
1047 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1048 * @retval VERR_* Fatal errors.
1049 *
1050 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1051 * @param pRegFrame The register frame.
1052 * Updates the EIP if an instruction was executed successfully.
1053 * @param pvFault The fault address (CR2).
1054 * @param pcbWritten Size of the write (if applicable).
1055 *
1056 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1057 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1058 * to worry about e.g. invalid modrm combinations (!)
1059 */
1060VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
1061{
1062 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1063 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1064 NOREF(pvFault);
1065
1066 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
1067 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1068 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1069 rc = VERR_EM_INTERPRETER;
1070 if (rc != VINF_SUCCESS)
1071 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1072
1073 return rc;
1074}
1075
1076
1077/**
1078 * Interprets the current instruction using the supplied DISCPUSTATE structure.
1079 *
1080 * IP/EIP/RIP *IS* updated!
1081 *
1082 * @returns VBox strict status code.
1083 * @retval VINF_* Scheduling instructions. When these are returned, it
1084 * starts to get a bit tricky to know whether code was
1085 * executed or not... We'll address this when it becomes a problem.
1086 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1087 * @retval VERR_* Fatal errors.
1088 *
1089 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1090 * @param pDis The disassembler cpu state for the instruction to be
1091 * interpreted.
1092 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
1093 * @param pvFault The fault address (CR2).
1094 * @param enmCodeType Code type (user/supervisor)
1095 *
1096 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1097 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1098 * to worry about e.g. invalid modrm combinations (!)
1099 *
1100 * @todo At this time we do NOT check if the instruction overwrites vital information.
1101 * Make sure this can't happen!! (will add some assertions/checks later)
1102 */
1103VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPUCC pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
1104 RTGCPTR pvFault, EMCODETYPE enmCodeType)
1105{
1106 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1107 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1108 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
1109
1110 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1111 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1112 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1113 rc = VERR_EM_INTERPRETER;
1114
1115 if (rc != VINF_SUCCESS)
1116 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1117
1118 return rc;
1119}
1120
1121
1122
1123
1124/*
1125 *
1126 * Old interpreter primitives used by HM, move/eliminate later.
1127 * Old interpreter primitives used by HM, move/eliminate later.
1128 * Old interpreter primitives used by HM, move/eliminate later.
1129 * Old interpreter primitives used by HM, move/eliminate later.
1130 * Old interpreter primitives used by HM, move/eliminate later.
1131 *
1132 */
1133
1134
1135/**
1136 * Interpret RDPMC.
1137 *
1138 * @returns VBox status code.
1139 * @param pVM The cross context VM structure.
1140 * @param pVCpu The cross context virtual CPU structure.
1141 * @param pRegFrame The register frame.
1142 *
1143 */
1144VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1145{
1146 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1147 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1148
1149 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1150 if ( !(uCR4 & X86_CR4_PCE)
1151 && CPUMGetGuestCPL(pVCpu) != 0)
1152 {
1153 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1154 return VERR_EM_INTERPRETER; /* genuine #GP */
1155 }
1156
1157 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1158 pRegFrame->rax = 0;
1159 pRegFrame->rdx = 0;
1160 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1161 * ecx but see @bugref{3472}! */
1162
1163 NOREF(pVM);
1164 return VINF_SUCCESS;
1165}
1166
1167
1168/* VT-x only: */
1169
1170/**
1171 * Interpret DRx write.
1172 *
1173 * @returns VBox status code.
1174 * @param pVM The cross context VM structure.
1175 * @param pVCpu The cross context virtual CPU structure.
1176 * @param pRegFrame The register frame.
1177 * @param DestRegDrx DRx register index (USE_REG_DR*)
1178 * @param SrcRegGen General purpose register index (USE_REG_E**))
1179 *
1180 */
1181VMM_INT_DECL(int) EMInterpretDRxWrite(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
1182{
1183 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1184 uint64_t uNewDrX;
1185 int rc;
1186 NOREF(pVM);
1187
1188 if (CPUMIsGuestIn64BitCode(pVCpu))
1189 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
1190 else
1191 {
1192 uint32_t val32;
1193 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
1194 uNewDrX = val32;
1195 }
1196
1197 if (RT_SUCCESS(rc))
1198 {
1199 if (DestRegDrx == 6)
1200 {
1201 uNewDrX |= X86_DR6_RA1_MASK;
1202 uNewDrX &= ~X86_DR6_RAZ_MASK;
1203 }
1204 else if (DestRegDrx == 7)
1205 {
1206 uNewDrX |= X86_DR7_RA1_MASK;
1207 uNewDrX &= ~X86_DR7_RAZ_MASK;
1208 }
1209
1210 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
1211 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
1212 if (RT_SUCCESS(rc))
1213 return rc;
1214 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
1215 }
1216 return VERR_EM_INTERPRETER;
1217}
1218
1219
1220/**
1221 * Interpret DRx read.
1222 *
1223 * @returns VBox status code.
1224 * @param pVM The cross context VM structure.
1225 * @param pVCpu The cross context virtual CPU structure.
1226 * @param pRegFrame The register frame.
1227 * @param DestRegGen General purpose register index (USE_REG_E**))
1228 * @param SrcRegDrx DRx register index (USE_REG_DR*)
1229 */
1230VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
1231{
1232 uint64_t val64;
1233 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1234 NOREF(pVM);
1235
1236 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
1237 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
1238 if (CPUMIsGuestIn64BitCode(pVCpu))
1239 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
1240 else
1241 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
1242
1243 if (RT_SUCCESS(rc))
1244 return VINF_SUCCESS;
1245
1246 return VERR_EM_INTERPRETER;
1247}
1248
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use