VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/EMAll.cpp@ 74795

Last change on this file since 74795 was 74789, checked in by vboxsync, 6 years ago

vm.h,VMM,REM: s/VMCPU_FF_IS_PENDING/VMCPU_FF_IS_ANY_SET/g to emphasize the plurality of the flags argument and encourage using VMCPU_FF_IS_SET. bugref:9180

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 53.7 KB
Line 
1/* $Id: EMAll.cpp 74789 2018-10-12 10:34:32Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor(/Manager) - All contexts
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_EM
23#include <VBox/vmm/em.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/patm.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/iem.h>
29#include <VBox/vmm/iom.h>
30#include <VBox/vmm/hm.h>
31#include <VBox/vmm/pdmapi.h>
32#include <VBox/vmm/vmm.h>
33#include <VBox/vmm/stam.h>
34#include "EMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/param.h>
37#include <VBox/err.h>
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/log.h>
41#include <iprt/assert.h>
42#include <iprt/string.h>
43
44
45
46
47/**
48 * Get the current execution manager status.
49 *
50 * @returns Current status.
51 * @param pVCpu The cross context virtual CPU structure.
52 */
53VMM_INT_DECL(EMSTATE) EMGetState(PVMCPU pVCpu)
54{
55 return pVCpu->em.s.enmState;
56}
57
58
59/**
60 * Sets the current execution manager status. (use only when you know what you're doing!)
61 *
62 * @param pVCpu The cross context virtual CPU structure.
63 * @param enmNewState The new state, EMSTATE_WAIT_SIPI or EMSTATE_HALTED.
64 */
65VMM_INT_DECL(void) EMSetState(PVMCPU pVCpu, EMSTATE enmNewState)
66{
67 /* Only allowed combination: */
68 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
69 pVCpu->em.s.enmState = enmNewState;
70}
71
72
73/**
74 * Sets the PC for which interrupts should be inhibited.
75 *
76 * @param pVCpu The cross context virtual CPU structure.
77 * @param PC The PC.
78 */
79VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
80{
81 pVCpu->em.s.GCPtrInhibitInterrupts = PC;
82 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
83}
84
85
86/**
87 * Gets the PC for which interrupts should be inhibited.
88 *
89 * There are a few instructions which inhibits or delays interrupts
90 * for the instruction following them. These instructions are:
91 * - STI
92 * - MOV SS, r/m16
93 * - POP SS
94 *
95 * @returns The PC for which interrupts should be inhibited.
96 * @param pVCpu The cross context virtual CPU structure.
97 *
98 */
99VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
100{
101 return pVCpu->em.s.GCPtrInhibitInterrupts;
102}
103
104
105/**
106 * Enables / disable hypercall instructions.
107 *
108 * This interface is used by GIM to tell the execution monitors whether the
109 * hypercall instruction (VMMCALL & VMCALL) are allowed or should \#UD.
110 *
111 * @param pVCpu The cross context virtual CPU structure this applies to.
112 * @param fEnabled Whether hypercall instructions are enabled (true) or not.
113 */
114VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled)
115{
116 pVCpu->em.s.fHypercallEnabled = fEnabled;
117}
118
119
120/**
121 * Checks if hypercall instructions (VMMCALL & VMCALL) are enabled or not.
122 *
123 * @returns true if enabled, false if not.
124 * @param pVCpu The cross context virtual CPU structure.
125 *
126 * @note If this call becomes a performance factor, we can make the data
127 * field available thru a read-only view in VMCPU. See VM::cpum.ro.
128 */
129VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu)
130{
131 return pVCpu->em.s.fHypercallEnabled;
132}
133
134
135/**
136 * Prepare an MWAIT - essentials of the MONITOR instruction.
137 *
138 * @returns VINF_SUCCESS
139 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
140 * @param rax The content of RAX.
141 * @param rcx The content of RCX.
142 * @param rdx The content of RDX.
143 * @param GCPhys The physical address corresponding to rax.
144 */
145VMM_INT_DECL(int) EMMonitorWaitPrepare(PVMCPU pVCpu, uint64_t rax, uint64_t rcx, uint64_t rdx, RTGCPHYS GCPhys)
146{
147 pVCpu->em.s.MWait.uMonitorRAX = rax;
148 pVCpu->em.s.MWait.uMonitorRCX = rcx;
149 pVCpu->em.s.MWait.uMonitorRDX = rdx;
150 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_MONITOR_ACTIVE;
151 /** @todo Make use of GCPhys. */
152 NOREF(GCPhys);
153 /** @todo Complete MONITOR implementation. */
154 return VINF_SUCCESS;
155}
156
157
158/**
159 * Checks if the monitor hardware is armed / active.
160 *
161 * @returns true if armed, false otherwise.
162 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
163 */
164VMM_INT_DECL(bool) EMMonitorIsArmed(PVMCPU pVCpu)
165{
166 return RT_BOOL(pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_MONITOR_ACTIVE);
167}
168
169
170/**
171 * Performs an MWAIT.
172 *
173 * @returns VINF_SUCCESS
174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
175 * @param rax The content of RAX.
176 * @param rcx The content of RCX.
177 */
178VMM_INT_DECL(int) EMMonitorWaitPerform(PVMCPU pVCpu, uint64_t rax, uint64_t rcx)
179{
180 pVCpu->em.s.MWait.uMWaitRAX = rax;
181 pVCpu->em.s.MWait.uMWaitRCX = rcx;
182 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_ACTIVE;
183 if (rcx)
184 pVCpu->em.s.MWait.fWait |= EMMWAIT_FLAG_BREAKIRQIF0;
185 else
186 pVCpu->em.s.MWait.fWait &= ~EMMWAIT_FLAG_BREAKIRQIF0;
187 /** @todo not completely correct?? */
188 return VINF_EM_HALT;
189}
190
191
192/**
193 * Clears any address-range monitoring that is active.
194 *
195 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
196 */
197VMM_INT_DECL(void) EMMonitorWaitClear(PVMCPU pVCpu)
198{
199 LogFlowFunc(("Clearing MWAIT\n"));
200 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
201}
202
203
204/**
205 * Determine if we should continue execution in HM after encountering an mwait
206 * instruction.
207 *
208 * Clears MWAIT flags if returning @c true.
209 *
210 * @returns true if we should continue, false if we should halt.
211 * @param pVCpu The cross context virtual CPU structure.
212 * @param pCtx Current CPU context.
213 */
214VMM_INT_DECL(bool) EMMonitorWaitShouldContinue(PVMCPU pVCpu, PCPUMCTX pCtx)
215{
216 if ( pCtx->eflags.Bits.u1IF
217 || ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
218 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0)) )
219 {
220 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
221 {
222 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
223 return true;
224 }
225 }
226
227 return false;
228}
229
230
231/**
232 * Determine if we should continue execution in HM after encountering a hlt
233 * instruction.
234 *
235 * @returns true if we should continue, false if we should halt.
236 * @param pVCpu The cross context virtual CPU structure.
237 * @param pCtx Current CPU context.
238 */
239VMM_INT_DECL(bool) EMShouldContinueAfterHalt(PVMCPU pVCpu, PCPUMCTX pCtx)
240{
241 /** @todo Shouldn't we be checking GIF here? */
242 if (pCtx->eflags.Bits.u1IF)
243 return VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC));
244 return false;
245}
246
247
248/**
249 * Unhalts and wakes up the given CPU.
250 *
251 * This is an API for assisting the KVM hypercall API in implementing KICK_CPU.
252 * It sets VMCPU_FF_UNHALT for @a pVCpuDst and makes sure it is woken up. If
253 * the CPU isn't currently in a halt, the next HLT instruction it executes will
254 * be affected.
255 *
256 * @returns GVMMR0SchedWakeUpEx result or VINF_SUCCESS depending on context.
257 * @param pVM The cross context VM structure.
258 * @param pVCpuDst The cross context virtual CPU structure of the
259 * CPU to unhalt and wake up. This is usually not the
260 * same as the caller.
261 * @thread EMT
262 */
263VMM_INT_DECL(int) EMUnhaltAndWakeUp(PVM pVM, PVMCPU pVCpuDst)
264{
265 /*
266 * Flag the current(/next) HLT to unhalt immediately.
267 */
268 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_UNHALT);
269
270 /*
271 * Wake up the EMT (technically should be abstracted by VMM/VMEmt, but
272 * just do it here for now).
273 */
274#ifdef IN_RING0
275 /* We might be here with preemption disabled or enabled (i.e. depending on
276 thread-context hooks being used), so don't try obtaining the GVMMR0 used
277 lock here. See @bugref{7270#c148}. */
278 int rc = GVMMR0SchedWakeUpNoGVMNoLock(pVM, pVCpuDst->idCpu);
279 AssertRC(rc);
280
281#elif defined(IN_RING3)
282 int rc = SUPR3CallVMMR0(pVM->pVMR0, pVCpuDst->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL /* pvArg */);
283 AssertRC(rc);
284
285#else
286 /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */
287 Assert(pVM->cCpus == 1); NOREF(pVM);
288 int rc = VINF_SUCCESS;
289#endif
290 return rc;
291}
292
293#ifndef IN_RING3
294
295/**
296 * Makes an I/O port write pending for ring-3 processing.
297 *
298 * @returns VINF_EM_PENDING_R3_IOPORT_READ
299 * @param pVCpu The cross context virtual CPU structure.
300 * @param uPort The I/O port.
301 * @param cbInstr The instruction length (for RIP updating).
302 * @param cbValue The write size.
303 * @param uValue The value being written.
304 * @sa emR3ExecutePendingIoPortWrite
305 *
306 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
307 */
308VMMRZ_INT_DECL(VBOXSTRICTRC)
309EMRZSetPendingIoPortWrite(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue, uint32_t uValue)
310{
311 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
312 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
313 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
314 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
315 pVCpu->em.s.PendingIoPortAccess.uValue = uValue;
316 return VINF_EM_PENDING_R3_IOPORT_WRITE;
317}
318
319
320/**
321 * Makes an I/O port read pending for ring-3 processing.
322 *
323 * @returns VINF_EM_PENDING_R3_IOPORT_READ
324 * @param pVCpu The cross context virtual CPU structure.
325 * @param uPort The I/O port.
326 * @param cbInstr The instruction length (for RIP updating).
327 * @param cbValue The read size.
328 * @sa emR3ExecutePendingIoPortRead
329 *
330 * @note Must not be used when I/O port breakpoints are pending or when single stepping.
331 */
332VMMRZ_INT_DECL(VBOXSTRICTRC)
333EMRZSetPendingIoPortRead(PVMCPU pVCpu, RTIOPORT uPort, uint8_t cbInstr, uint8_t cbValue)
334{
335 Assert(pVCpu->em.s.PendingIoPortAccess.cbValue == 0);
336 pVCpu->em.s.PendingIoPortAccess.uPort = uPort;
337 pVCpu->em.s.PendingIoPortAccess.cbValue = cbValue;
338 pVCpu->em.s.PendingIoPortAccess.cbInstr = cbInstr;
339 pVCpu->em.s.PendingIoPortAccess.uValue = UINT32_C(0x52454144); /* 'READ' */
340 return VINF_EM_PENDING_R3_IOPORT_READ;
341}
342
343#endif /* IN_RING3 */
344
345
346/**
347 * Worker for EMHistoryExec that checks for ring-3 returns and flags
348 * continuation of the EMHistoryExec run there.
349 */
350DECL_FORCE_INLINE(void) emHistoryExecSetContinueExitRecIdx(PVMCPU pVCpu, VBOXSTRICTRC rcStrict, PCEMEXITREC pExitRec)
351{
352 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
353#ifdef IN_RING3
354 RT_NOREF_PV(rcStrict); RT_NOREF_PV(pExitRec);
355#else
356 switch (VBOXSTRICTRC_VAL(rcStrict))
357 {
358 case VINF_SUCCESS:
359 default:
360 break;
361
362 /*
363 * Only status codes that EMHandleRCTmpl.h will resume EMHistoryExec with.
364 */
365 case VINF_IOM_R3_IOPORT_READ: /* -> emR3ExecuteIOInstruction */
366 case VINF_IOM_R3_IOPORT_WRITE: /* -> emR3ExecuteIOInstruction */
367 case VINF_IOM_R3_IOPORT_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
368 case VINF_IOM_R3_MMIO_READ: /* -> emR3ExecuteInstruction */
369 case VINF_IOM_R3_MMIO_WRITE: /* -> emR3ExecuteInstruction */
370 case VINF_IOM_R3_MMIO_READ_WRITE: /* -> emR3ExecuteInstruction */
371 case VINF_IOM_R3_MMIO_COMMIT_WRITE: /* -> VMCPU_FF_IOM -> VINF_EM_RESUME_R3_HISTORY_EXEC -> emR3ExecuteIOInstruction */
372 case VINF_CPUM_R3_MSR_READ: /* -> emR3ExecuteInstruction */
373 case VINF_CPUM_R3_MSR_WRITE: /* -> emR3ExecuteInstruction */
374 case VINF_GIM_R3_HYPERCALL: /* -> emR3ExecuteInstruction */
375 pVCpu->em.s.idxContinueExitRec = (uint16_t)(pExitRec - &pVCpu->em.s.aExitRecords[0]);
376 break;
377 }
378#endif /* !IN_RING3 */
379}
380
381#ifndef IN_RC
382
383/**
384 * Execute using history.
385 *
386 * This function will be called when EMHistoryAddExit() and friends returns a
387 * non-NULL result. This happens in response to probing or when probing has
388 * uncovered adjacent exits which can more effectively be reached by using IEM
389 * than restarting execution using the main execution engine and fielding an
390 * regular exit.
391 *
392 * @returns VBox strict status code, see IEMExecForExits.
393 * @param pVCpu The cross context virtual CPU structure.
394 * @param pExitRec The exit record return by a previous history add
395 * or update call.
396 * @param fWillExit Flags indicating to IEM what will cause exits, TBD.
397 */
398VMM_INT_DECL(VBOXSTRICTRC) EMHistoryExec(PVMCPU pVCpu, PCEMEXITREC pExitRec, uint32_t fWillExit)
399{
400 Assert(pExitRec);
401 VMCPU_ASSERT_EMT(pVCpu);
402 IEMEXECFOREXITSTATS ExecStats;
403 switch (pExitRec->enmAction)
404 {
405 /*
406 * Executes multiple instruction stopping only when we've gone a given
407 * number without perceived exits.
408 */
409 case EMEXITACTION_EXEC_WITH_MAX:
410 {
411 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryExec, a);
412 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %RX64, max %u\n", pExitRec->uFlatPC, pExitRec->cMaxInstructionsWithoutExit));
413 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
414 pExitRec->cMaxInstructionsWithoutExit /* cMinInstructions*/,
415 pVCpu->em.s.cHistoryExecMaxInstructions,
416 pExitRec->cMaxInstructionsWithoutExit,
417 &ExecStats);
418 LogFlow(("EMHistoryExec/EXEC_WITH_MAX: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
419 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
420 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRec);
421
422 /* Ignore instructions IEM doesn't know about. */
423 if ( ( rcStrict != VERR_IEM_INSTR_NOT_IMPLEMENTED
424 && rcStrict != VERR_IEM_ASPECT_NOT_IMPLEMENTED)
425 || ExecStats.cInstructions == 0)
426 { /* likely */ }
427 else
428 rcStrict = VINF_SUCCESS;
429
430 if (ExecStats.cExits > 1)
431 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecSavedExits, ExecStats.cExits - 1);
432 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryExecInstructions, ExecStats.cInstructions);
433 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryExec, a);
434 return rcStrict;
435 }
436
437 /*
438 * Probe a exit for close by exits.
439 */
440 case EMEXITACTION_EXEC_PROBE:
441 {
442 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHistoryProbe, b);
443 LogFlow(("EMHistoryExec/EXEC_PROBE: %RX64\n", pExitRec->uFlatPC));
444 PEMEXITREC pExitRecUnconst = (PEMEXITREC)pExitRec;
445 VBOXSTRICTRC rcStrict = IEMExecForExits(pVCpu, fWillExit,
446 pVCpu->em.s.cHistoryProbeMinInstructions,
447 pVCpu->em.s.cHistoryExecMaxInstructions,
448 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit,
449 &ExecStats);
450 LogFlow(("EMHistoryExec/EXEC_PROBE: %Rrc cExits=%u cMaxExitDistance=%u cInstructions=%u\n",
451 VBOXSTRICTRC_VAL(rcStrict), ExecStats.cExits, ExecStats.cMaxExitDistance, ExecStats.cInstructions));
452 emHistoryExecSetContinueExitRecIdx(pVCpu, rcStrict, pExitRecUnconst);
453 if ( ExecStats.cExits >= 2
454 && RT_SUCCESS(rcStrict))
455 {
456 Assert(ExecStats.cMaxExitDistance > 0 && ExecStats.cMaxExitDistance <= 32);
457 pExitRecUnconst->cMaxInstructionsWithoutExit = ExecStats.cMaxExitDistance;
458 pExitRecUnconst->enmAction = EMEXITACTION_EXEC_WITH_MAX;
459 LogFlow(("EMHistoryExec/EXEC_PROBE: -> EXEC_WITH_MAX %u\n", ExecStats.cMaxExitDistance));
460 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedExecWithMax);
461 }
462#ifndef IN_RING3
463 else if ( pVCpu->em.s.idxContinueExitRec != UINT16_MAX
464 && RT_SUCCESS(rcStrict))
465 {
466 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedToRing3);
467 LogFlow(("EMHistoryExec/EXEC_PROBE: -> ring-3\n"));
468 }
469#endif
470 else
471 {
472 pExitRecUnconst->enmAction = EMEXITACTION_NORMAL_PROBED;
473 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
474 LogFlow(("EMHistoryExec/EXEC_PROBE: -> PROBED\n"));
475 STAM_REL_COUNTER_INC(&pVCpu->em.s.StatHistoryProbedNormal);
476 if ( rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED
477 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
478 rcStrict = VINF_SUCCESS;
479 }
480 STAM_REL_COUNTER_ADD(&pVCpu->em.s.StatHistoryProbeInstructions, ExecStats.cInstructions);
481 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHistoryProbe, b);
482 return rcStrict;
483 }
484
485 /* We shouldn't ever see these here! */
486 case EMEXITACTION_FREE_RECORD:
487 case EMEXITACTION_NORMAL:
488 case EMEXITACTION_NORMAL_PROBED:
489 break;
490
491 /* No default case, want compiler warnings. */
492 }
493 AssertLogRelFailedReturn(VERR_EM_INTERNAL_ERROR);
494}
495
496
497/**
498 * Worker for emHistoryAddOrUpdateRecord.
499 */
500DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInit(PEMEXITREC pExitRec, uint64_t uFlatPC, uint32_t uFlagsAndType, uint64_t uExitNo)
501{
502 pExitRec->uFlatPC = uFlatPC;
503 pExitRec->uFlagsAndType = uFlagsAndType;
504 pExitRec->enmAction = EMEXITACTION_NORMAL;
505 pExitRec->bUnused = 0;
506 pExitRec->cMaxInstructionsWithoutExit = 64;
507 pExitRec->uLastExitNo = uExitNo;
508 pExitRec->cHits = 1;
509 return NULL;
510}
511
512
513/**
514 * Worker for emHistoryAddOrUpdateRecord.
515 */
516DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitNew(PVMCPU pVCpu, PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
517 PEMEXITREC pExitRec, uint64_t uFlatPC,
518 uint32_t uFlagsAndType, uint64_t uExitNo)
519{
520 pHistEntry->idxSlot = (uint32_t)idxSlot;
521 pVCpu->em.s.cExitRecordUsed++;
522 LogFlow(("emHistoryRecordInitNew: [%#x] = %#07x %016RX64; (%u of %u used)\n", idxSlot, uFlagsAndType, uFlatPC,
523 pVCpu->em.s.cExitRecordUsed, RT_ELEMENTS(pVCpu->em.s.aExitRecords) ));
524 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
525}
526
527
528/**
529 * Worker for emHistoryAddOrUpdateRecord.
530 */
531DECL_FORCE_INLINE(PCEMEXITREC) emHistoryRecordInitReplacement(PEMEXITENTRY pHistEntry, uintptr_t idxSlot,
532 PEMEXITREC pExitRec, uint64_t uFlatPC,
533 uint32_t uFlagsAndType, uint64_t uExitNo)
534{
535 pHistEntry->idxSlot = (uint32_t)idxSlot;
536 LogFlow(("emHistoryRecordInitReplacement: [%#x] = %#07x %016RX64 replacing %#07x %016RX64 with %u hits, %u exits old\n",
537 idxSlot, uFlagsAndType, uFlatPC, pExitRec->uFlagsAndType, pExitRec->uFlatPC, pExitRec->cHits,
538 uExitNo - pExitRec->uLastExitNo));
539 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
540}
541
542
543/**
544 * Adds or updates the EMEXITREC for this PC/type and decide on an action.
545 *
546 * @returns Pointer to an exit record if special action should be taken using
547 * EMHistoryExec(). Take normal exit action when NULL.
548 *
549 * @param pVCpu The cross context virtual CPU structure.
550 * @param uFlagsAndType Combined flags and type, EMEXIT_F_KIND_EM set and
551 * both EMEXIT_F_CS_EIP and EMEXIT_F_UNFLATTENED_PC are clear.
552 * @param uFlatPC The flattened program counter.
553 * @param pHistEntry The exit history entry.
554 * @param uExitNo The current exit number.
555 */
556static PCEMEXITREC emHistoryAddOrUpdateRecord(PVMCPU pVCpu, uint64_t uFlagsAndType, uint64_t uFlatPC,
557 PEMEXITENTRY pHistEntry, uint64_t uExitNo)
558{
559# ifdef IN_RING0
560 /* Disregard the hm flag. */
561 uFlagsAndType &= ~EMEXIT_F_HM;
562# endif
563
564 /*
565 * Work the hash table.
566 */
567 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitRecords) == 1024);
568# define EM_EXIT_RECORDS_IDX_MASK 0x3ff
569 uintptr_t idxSlot = ((uintptr_t)uFlatPC >> 1) & EM_EXIT_RECORDS_IDX_MASK;
570 PEMEXITREC pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
571 if (pExitRec->uFlatPC == uFlatPC)
572 {
573 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
574 pHistEntry->idxSlot = (uint32_t)idxSlot;
575 if (pExitRec->uFlagsAndType == uFlagsAndType)
576 {
577 pExitRec->uLastExitNo = uExitNo;
578 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[0]);
579 }
580 else
581 {
582 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[0]);
583 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
584 }
585 }
586 else if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
587 {
588 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[0]);
589 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
590 }
591 else
592 {
593 /*
594 * Collision. We calculate a new hash for stepping away from the first,
595 * doing up to 8 steps away before replacing the least recently used record.
596 */
597 uintptr_t idxOldest = idxSlot;
598 uint64_t uOldestExitNo = pExitRec->uLastExitNo;
599 unsigned iOldestStep = 0;
600 unsigned iStep = 1;
601 uintptr_t const idxAdd = (uintptr_t)(uFlatPC >> 11) & (EM_EXIT_RECORDS_IDX_MASK / 4);
602 for (;;)
603 {
604 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
605 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecNew) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
606 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
607 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecTypeChanged) == RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits));
608
609 /* Step to the next slot. */
610 idxSlot += idxAdd;
611 idxSlot &= EM_EXIT_RECORDS_IDX_MASK;
612 pExitRec = &pVCpu->em.s.aExitRecords[idxSlot];
613
614 /* Does it match? */
615 if (pExitRec->uFlatPC == uFlatPC)
616 {
617 Assert(pExitRec->enmAction != EMEXITACTION_FREE_RECORD);
618 pHistEntry->idxSlot = (uint32_t)idxSlot;
619 if (pExitRec->uFlagsAndType == uFlagsAndType)
620 {
621 pExitRec->uLastExitNo = uExitNo;
622 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecHits[iStep]);
623 break;
624 }
625 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecTypeChanged[iStep]);
626 return emHistoryRecordInit(pExitRec, uFlatPC, uFlagsAndType, uExitNo);
627 }
628
629 /* Is it free? */
630 if (pExitRec->enmAction == EMEXITACTION_FREE_RECORD)
631 {
632 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecNew[iStep]);
633 return emHistoryRecordInitNew(pVCpu, pHistEntry, idxSlot, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
634 }
635
636 /* Is it the least recently used one? */
637 if (pExitRec->uLastExitNo < uOldestExitNo)
638 {
639 uOldestExitNo = pExitRec->uLastExitNo;
640 idxOldest = idxSlot;
641 iOldestStep = iStep;
642 }
643
644 /* Next iteration? */
645 iStep++;
646 Assert(iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecReplaced));
647 if (RT_LIKELY(iStep < 8 + 1))
648 { /* likely */ }
649 else
650 {
651 /* Replace the least recently used slot. */
652 STAM_REL_COUNTER_INC(&pVCpu->em.s.aStatHistoryRecReplaced[iOldestStep]);
653 pExitRec = &pVCpu->em.s.aExitRecords[idxOldest];
654 return emHistoryRecordInitReplacement(pHistEntry, idxOldest, pExitRec, uFlatPC, uFlagsAndType, uExitNo);
655 }
656 }
657 }
658
659 /*
660 * Found an existing record.
661 */
662 switch (pExitRec->enmAction)
663 {
664 case EMEXITACTION_NORMAL:
665 {
666 uint64_t const cHits = ++pExitRec->cHits;
667 if (cHits < 256)
668 return NULL;
669 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> EXEC_PROBE\n", idxSlot, uFlagsAndType, uFlatPC));
670 pExitRec->enmAction = EMEXITACTION_EXEC_PROBE;
671 return pExitRec;
672 }
673
674 case EMEXITACTION_NORMAL_PROBED:
675 pExitRec->cHits += 1;
676 return NULL;
677
678 default:
679 pExitRec->cHits += 1;
680 return pExitRec;
681
682 /* This will happen if the caller ignores or cannot serve the probe
683 request (forced to ring-3, whatever). We retry this 256 times. */
684 case EMEXITACTION_EXEC_PROBE:
685 {
686 uint64_t const cHits = ++pExitRec->cHits;
687 if (cHits < 512)
688 return pExitRec;
689 pExitRec->enmAction = EMEXITACTION_NORMAL_PROBED;
690 LogFlow(("emHistoryAddOrUpdateRecord: [%#x] %#07x %16RX64: -> PROBED\n", idxSlot, uFlagsAndType, uFlatPC));
691 return NULL;
692 }
693 }
694}
695
696#endif /* !IN_RC */
697
698/**
699 * Adds an exit to the history for this CPU.
700 *
701 * @returns Pointer to an exit record if special action should be taken using
702 * EMHistoryExec(). Take normal exit action when NULL.
703 *
704 * @param pVCpu The cross context virtual CPU structure.
705 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
706 * @param uFlatPC The flattened program counter (RIP). UINT64_MAX if not available.
707 * @param uTimestamp The TSC value for the exit, 0 if not available.
708 * @thread EMT(pVCpu)
709 */
710VMM_INT_DECL(PCEMEXITREC) EMHistoryAddExit(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC, uint64_t uTimestamp)
711{
712 VMCPU_ASSERT_EMT(pVCpu);
713
714 /*
715 * Add the exit history entry.
716 */
717 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
718 uint64_t uExitNo = pVCpu->em.s.iNextExit++;
719 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
720 pHistEntry->uFlatPC = uFlatPC;
721 pHistEntry->uTimestamp = uTimestamp;
722 pHistEntry->uFlagsAndType = uFlagsAndType;
723 pHistEntry->idxSlot = UINT32_MAX;
724
725#ifndef IN_RC
726 /*
727 * If common exit type, we will insert/update the exit into the exit record hash table.
728 */
729 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
730# ifdef IN_RING0
731 && pVCpu->em.s.fExitOptimizationEnabledR0
732 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
733# else
734 && pVCpu->em.s.fExitOptimizationEnabled
735# endif
736 && uFlatPC != UINT64_MAX
737 )
738 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
739#endif
740 return NULL;
741}
742
743
744#ifdef IN_RC
745/**
746 * Special raw-mode interface for adding an exit to the history.
747 *
748 * Currently this is only for recording, not optimizing, so no return value. If
749 * we start seriously caring about raw-mode again, we may extend it.
750 *
751 * @param pVCpu The cross context virtual CPU structure.
752 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
753 * @param uCs The CS.
754 * @param uEip The EIP.
755 * @param uTimestamp The TSC value for the exit, 0 if not available.
756 * @thread EMT(0)
757 */
758VMMRC_INT_DECL(void) EMRCHistoryAddExitCsEip(PVMCPU pVCpu, uint32_t uFlagsAndType, uint16_t uCs, uint32_t uEip, uint64_t uTimestamp)
759{
760 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
761 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)(pVCpu->em.s.iNextExit++) & 0xff];
762 pHistEntry->uFlatPC = ((uint64_t)uCs << 32) | uEip;
763 pHistEntry->uTimestamp = uTimestamp;
764 pHistEntry->uFlagsAndType = uFlagsAndType | EMEXIT_F_CS_EIP;
765 pHistEntry->idxSlot = UINT32_MAX;
766}
767#endif
768
769
770#ifdef IN_RING0
771/**
772 * Interface that VT-x uses to supply the PC of an exit when CS:RIP is being read.
773 *
774 * @param pVCpu The cross context virtual CPU structure.
775 * @param uFlatPC The flattened program counter (RIP).
776 * @param fFlattened Set if RIP was subjected to CS.BASE, clear if not.
777 */
778VMMR0_INT_DECL(void) EMR0HistoryUpdatePC(PVMCPU pVCpu, uint64_t uFlatPC, bool fFlattened)
779{
780 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
781 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
782 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
783 pHistEntry->uFlatPC = uFlatPC;
784 if (fFlattened)
785 pHistEntry->uFlagsAndType &= ~EMEXIT_F_UNFLATTENED_PC;
786 else
787 pHistEntry->uFlagsAndType |= EMEXIT_F_UNFLATTENED_PC;
788}
789#endif
790
791
792/**
793 * Interface for convering a engine specific exit to a generic one and get guidance.
794 *
795 * @returns Pointer to an exit record if special action should be taken using
796 * EMHistoryExec(). Take normal exit action when NULL.
797 *
798 * @param pVCpu The cross context virtual CPU structure.
799 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
800 * @thread EMT(pVCpu)
801 */
802VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndType(PVMCPU pVCpu, uint32_t uFlagsAndType)
803{
804 VMCPU_ASSERT_EMT(pVCpu);
805
806 /*
807 * Do the updating.
808 */
809 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
810 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
811 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
812 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC));
813
814#ifndef IN_RC
815 /*
816 * If common exit type, we will insert/update the exit into the exit record hash table.
817 */
818 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
819# ifdef IN_RING0
820 && pVCpu->em.s.fExitOptimizationEnabledR0
821 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
822# else
823 && pVCpu->em.s.fExitOptimizationEnabled
824# endif
825 && pHistEntry->uFlatPC != UINT64_MAX
826 )
827 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo);
828#endif
829 return NULL;
830}
831
832
833/**
834 * Interface for convering a engine specific exit to a generic one and get
835 * guidance, supplying flattened PC too.
836 *
837 * @returns Pointer to an exit record if special action should be taken using
838 * EMHistoryExec(). Take normal exit action when NULL.
839 *
840 * @param pVCpu The cross context virtual CPU structure.
841 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).
842 * @param uFlatPC The flattened program counter (RIP).
843 * @thread EMT(pVCpu)
844 */
845VMM_INT_DECL(PCEMEXITREC) EMHistoryUpdateFlagsAndTypeAndPC(PVMCPU pVCpu, uint32_t uFlagsAndType, uint64_t uFlatPC)
846{
847 VMCPU_ASSERT_EMT(pVCpu);
848 Assert(uFlatPC != UINT64_MAX);
849
850 /*
851 * Do the updating.
852 */
853 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);
854 uint64_t uExitNo = pVCpu->em.s.iNextExit - 1;
855 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)uExitNo & 0xff];
856 pHistEntry->uFlagsAndType = uFlagsAndType;
857 pHistEntry->uFlatPC = uFlatPC;
858
859#ifndef IN_RC
860 /*
861 * If common exit type, we will insert/update the exit into the exit record hash table.
862 */
863 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM
864# ifdef IN_RING0
865 && pVCpu->em.s.fExitOptimizationEnabledR0
866 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled)
867# else
868 && pVCpu->em.s.fExitOptimizationEnabled
869# endif
870 )
871 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo);
872#endif
873 return NULL;
874}
875
876
877/**
878 * Locks REM execution to a single VCPU.
879 *
880 * @param pVM The cross context VM structure.
881 */
882VMMDECL(void) EMRemLock(PVM pVM)
883{
884#ifdef VBOX_WITH_REM
885 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
886 return; /* early init */
887
888 Assert(!PGMIsLockOwner(pVM));
889 Assert(!IOMIsLockWriteOwner(pVM));
890 int rc = PDMCritSectEnter(&pVM->em.s.CritSectREM, VERR_SEM_BUSY);
891 AssertRCSuccess(rc);
892#else
893 RT_NOREF(pVM);
894#endif
895}
896
897
898/**
899 * Unlocks REM execution
900 *
901 * @param pVM The cross context VM structure.
902 */
903VMMDECL(void) EMRemUnlock(PVM pVM)
904{
905#ifdef VBOX_WITH_REM
906 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
907 return; /* early init */
908
909 PDMCritSectLeave(&pVM->em.s.CritSectREM);
910#else
911 RT_NOREF(pVM);
912#endif
913}
914
915
916/**
917 * Check if this VCPU currently owns the REM lock.
918 *
919 * @returns bool owner/not owner
920 * @param pVM The cross context VM structure.
921 */
922VMMDECL(bool) EMRemIsLockOwner(PVM pVM)
923{
924#ifdef VBOX_WITH_REM
925 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
926 return true; /* early init */
927
928 return PDMCritSectIsOwner(&pVM->em.s.CritSectREM);
929#else
930 RT_NOREF(pVM);
931 return true;
932#endif
933}
934
935
936/**
937 * Try to acquire the REM lock.
938 *
939 * @returns VBox status code
940 * @param pVM The cross context VM structure.
941 */
942VMM_INT_DECL(int) EMRemTryLock(PVM pVM)
943{
944#ifdef VBOX_WITH_REM
945 if (!PDMCritSectIsInitialized(&pVM->em.s.CritSectREM))
946 return VINF_SUCCESS; /* early init */
947
948 return PDMCritSectTryEnter(&pVM->em.s.CritSectREM);
949#else
950 RT_NOREF(pVM);
951 return VINF_SUCCESS;
952#endif
953}
954
955
956/**
957 * @callback_method_impl{FNDISREADBYTES}
958 */
959static DECLCALLBACK(int) emReadBytes(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
960{
961 PVMCPU pVCpu = (PVMCPU)pDis->pvUser;
962#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
963 PVM pVM = pVCpu->CTX_SUFF(pVM);
964#endif
965 RTUINTPTR uSrcAddr = pDis->uInstrAddr + offInstr;
966 int rc;
967
968 /*
969 * Figure how much we can or must read.
970 */
971 size_t cbToRead = PAGE_SIZE - (uSrcAddr & PAGE_OFFSET_MASK);
972 if (cbToRead > cbMaxRead)
973 cbToRead = cbMaxRead;
974 else if (cbToRead < cbMinRead)
975 cbToRead = cbMinRead;
976
977#if defined(VBOX_WITH_RAW_MODE) && (defined(IN_RC) || defined(IN_RING3))
978 /*
979 * We might be called upon to interpret an instruction in a patch.
980 */
981 if (PATMIsPatchGCAddr(pVM, uSrcAddr))
982 {
983# ifdef IN_RC
984 memcpy(&pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
985# else
986 memcpy(&pDis->abInstr[offInstr], PATMR3GCPtrToHCPtr(pVM, uSrcAddr), cbToRead);
987# endif
988 rc = VINF_SUCCESS;
989 }
990 else
991#endif
992 {
993# ifdef IN_RC
994 /*
995 * Try access it thru the shadow page tables first. Fall back on the
996 * slower PGM method if it fails because the TLB or page table was
997 * modified recently.
998 */
999 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
1000 if (rc == VERR_ACCESS_DENIED && cbToRead > cbMinRead)
1001 {
1002 cbToRead = cbMinRead;
1003 rc = MMGCRamRead(pVCpu->pVMRC, &pDis->abInstr[offInstr], (void *)(uintptr_t)uSrcAddr, cbToRead);
1004 }
1005 if (rc == VERR_ACCESS_DENIED)
1006#endif
1007 {
1008 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
1009 if (RT_FAILURE(rc))
1010 {
1011 if (cbToRead > cbMinRead)
1012 {
1013 cbToRead = cbMinRead;
1014 rc = PGMPhysSimpleReadGCPtr(pVCpu, &pDis->abInstr[offInstr], uSrcAddr, cbToRead);
1015 }
1016 if (RT_FAILURE(rc))
1017 {
1018#ifndef IN_RC
1019 /*
1020 * If we fail to find the page via the guest's page tables
1021 * we invalidate the page in the host TLB (pertaining to
1022 * the guest in the NestedPaging case). See @bugref{6043}.
1023 */
1024 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
1025 {
1026 HMInvalidatePage(pVCpu, uSrcAddr);
1027 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT))
1028 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
1029 }
1030#endif
1031 }
1032 }
1033 }
1034 }
1035
1036 pDis->cbCachedInstr = offInstr + (uint8_t)cbToRead;
1037 return rc;
1038}
1039
1040
1041
1042/**
1043 * Disassembles the current instruction.
1044 *
1045 * @returns VBox status code, see SELMToFlatEx and EMInterpretDisasOneEx for
1046 * details.
1047 *
1048 * @param pVM The cross context VM structure.
1049 * @param pVCpu The cross context virtual CPU structure.
1050 * @param pDis Where to return the parsed instruction info.
1051 * @param pcbInstr Where to return the instruction size. (optional)
1052 */
1053VMM_INT_DECL(int) EMInterpretDisasCurrent(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pDis, unsigned *pcbInstr)
1054{
1055 PCPUMCTXCORE pCtxCore = CPUMCTX2CORE(CPUMQueryGuestCtxPtr(pVCpu));
1056 RTGCPTR GCPtrInstr;
1057#if 0
1058 int rc = SELMToFlatEx(pVCpu, DISSELREG_CS, pCtxCore, pCtxCore->rip, 0, &GCPtrInstr);
1059#else
1060/** @todo Get the CPU mode as well while we're at it! */
1061 int rc = SELMValidateAndConvertCSAddr(pVCpu, pCtxCore->eflags, pCtxCore->ss.Sel, pCtxCore->cs.Sel, &pCtxCore->cs,
1062 pCtxCore->rip, &GCPtrInstr);
1063#endif
1064 if (RT_FAILURE(rc))
1065 {
1066 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%RGv (cpl=%d) - rc=%Rrc !!\n",
1067 pCtxCore->cs.Sel, (RTGCPTR)pCtxCore->rip, pCtxCore->ss.Sel & X86_SEL_RPL, rc));
1068 return rc;
1069 }
1070 return EMInterpretDisasOneEx(pVM, pVCpu, (RTGCUINTPTR)GCPtrInstr, pCtxCore, pDis, pcbInstr);
1071}
1072
1073
1074/**
1075 * Disassembles one instruction.
1076 *
1077 * This is used by internally by the interpreter and by trap/access handlers.
1078 *
1079 * @returns VBox status code.
1080 *
1081 * @param pVM The cross context VM structure.
1082 * @param pVCpu The cross context virtual CPU structure.
1083 * @param GCPtrInstr The flat address of the instruction.
1084 * @param pCtxCore The context core (used to determine the cpu mode).
1085 * @param pDis Where to return the parsed instruction info.
1086 * @param pcbInstr Where to return the instruction size. (optional)
1087 */
1088VMM_INT_DECL(int) EMInterpretDisasOneEx(PVM pVM, PVMCPU pVCpu, RTGCUINTPTR GCPtrInstr, PCCPUMCTXCORE pCtxCore,
1089 PDISCPUSTATE pDis, unsigned *pcbInstr)
1090{
1091 NOREF(pVM);
1092 Assert(pCtxCore == CPUMGetGuestCtxCore(pVCpu)); NOREF(pCtxCore);
1093 DISCPUMODE enmCpuMode = CPUMGetGuestDisMode(pVCpu);
1094 /** @todo Deal with too long instruction (=> \#GP), opcode read errors (=>
1095 * \#PF, \#GP, \#??), undefined opcodes (=> \#UD), and such. */
1096 int rc = DISInstrWithReader(GCPtrInstr, enmCpuMode, emReadBytes, pVCpu, pDis, pcbInstr);
1097 if (RT_SUCCESS(rc))
1098 return VINF_SUCCESS;
1099 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("DISCoreOne failed to GCPtrInstr=%RGv rc=%Rrc\n", GCPtrInstr, rc));
1100 return rc;
1101}
1102
1103
1104/**
1105 * Interprets the current instruction.
1106 *
1107 * @returns VBox status code.
1108 * @retval VINF_* Scheduling instructions.
1109 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1110 * @retval VERR_* Fatal errors.
1111 *
1112 * @param pVCpu The cross context virtual CPU structure.
1113 * @param pRegFrame The register frame.
1114 * Updates the EIP if an instruction was executed successfully.
1115 * @param pvFault The fault address (CR2).
1116 *
1117 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1118 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1119 * to worry about e.g. invalid modrm combinations (!)
1120 */
1121VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstruction(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1122{
1123 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1124 LogFlow(("EMInterpretInstruction %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1125 NOREF(pvFault);
1126
1127 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, NULL);
1128 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1129 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1130 rc = VERR_EM_INTERPRETER;
1131 if (rc != VINF_SUCCESS)
1132 Log(("EMInterpretInstruction: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1133
1134 return rc;
1135}
1136
1137
1138/**
1139 * Interprets the current instruction.
1140 *
1141 * @returns VBox status code.
1142 * @retval VINF_* Scheduling instructions.
1143 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1144 * @retval VERR_* Fatal errors.
1145 *
1146 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1147 * @param pRegFrame The register frame.
1148 * Updates the EIP if an instruction was executed successfully.
1149 * @param pvFault The fault address (CR2).
1150 * @param pcbWritten Size of the write (if applicable).
1151 *
1152 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1153 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1154 * to worry about e.g. invalid modrm combinations (!)
1155 */
1156VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionEx(PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, uint32_t *pcbWritten)
1157{
1158 LogFlow(("EMInterpretInstructionEx %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1159 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1160 NOREF(pvFault);
1161
1162 VBOXSTRICTRC rc = IEMExecOneBypassEx(pVCpu, pRegFrame, pcbWritten);
1163 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1164 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1165 rc = VERR_EM_INTERPRETER;
1166 if (rc != VINF_SUCCESS)
1167 Log(("EMInterpretInstructionEx: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1168
1169 return rc;
1170}
1171
1172
1173/**
1174 * Interprets the current instruction using the supplied DISCPUSTATE structure.
1175 *
1176 * IP/EIP/RIP *IS* updated!
1177 *
1178 * @returns VBox strict status code.
1179 * @retval VINF_* Scheduling instructions. When these are returned, it
1180 * starts to get a bit tricky to know whether code was
1181 * executed or not... We'll address this when it becomes a problem.
1182 * @retval VERR_EM_INTERPRETER Something we can't cope with.
1183 * @retval VERR_* Fatal errors.
1184 *
1185 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1186 * @param pDis The disassembler cpu state for the instruction to be
1187 * interpreted.
1188 * @param pRegFrame The register frame. IP/EIP/RIP *IS* changed!
1189 * @param pvFault The fault address (CR2).
1190 * @param enmCodeType Code type (user/supervisor)
1191 *
1192 * @remark Invalid opcode exceptions have a higher priority than GP (see Intel
1193 * Architecture System Developers Manual, Vol 3, 5.5) so we don't need
1194 * to worry about e.g. invalid modrm combinations (!)
1195 *
1196 * @todo At this time we do NOT check if the instruction overwrites vital information.
1197 * Make sure this can't happen!! (will add some assertions/checks later)
1198 */
1199VMM_INT_DECL(VBOXSTRICTRC) EMInterpretInstructionDisasState(PVMCPU pVCpu, PDISCPUSTATE pDis, PCPUMCTXCORE pRegFrame,
1200 RTGCPTR pvFault, EMCODETYPE enmCodeType)
1201{
1202 LogFlow(("EMInterpretInstructionDisasState %RGv fault %RGv\n", (RTGCPTR)pRegFrame->rip, pvFault));
1203 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1204 NOREF(pDis); NOREF(pvFault); NOREF(enmCodeType);
1205
1206 VBOXSTRICTRC rc = IEMExecOneBypassWithPrefetchedByPC(pVCpu, pRegFrame, pRegFrame->rip, pDis->abInstr, pDis->cbCachedInstr);
1207 if (RT_UNLIKELY( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1208 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED))
1209 rc = VERR_EM_INTERPRETER;
1210
1211 if (rc != VINF_SUCCESS)
1212 Log(("EMInterpretInstructionDisasState: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1213
1214 return rc;
1215}
1216
1217#ifdef IN_RC
1218
1219DECLINLINE(int) emRCStackRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCPTR GCPtrSrc, uint32_t cb)
1220{
1221 int rc = MMGCRamRead(pVM, pvDst, (void *)(uintptr_t)GCPtrSrc, cb);
1222 if (RT_LIKELY(rc != VERR_ACCESS_DENIED))
1223 return rc;
1224 return PGMPhysInterpretedReadNoHandlers(pVCpu, pCtxCore, pvDst, GCPtrSrc, cb, /*fMayTrap*/ false);
1225}
1226
1227
1228/**
1229 * Interpret IRET (currently only to V86 code) - PATM only.
1230 *
1231 * @returns VBox status code.
1232 * @param pVM The cross context VM structure.
1233 * @param pVCpu The cross context virtual CPU structure.
1234 * @param pRegFrame The register frame.
1235 *
1236 */
1237VMM_INT_DECL(int) EMInterpretIretV86ForPatm(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1238{
1239 RTGCUINTPTR pIretStack = (RTGCUINTPTR)pRegFrame->esp;
1240 RTGCUINTPTR eip, cs, esp, ss, eflags, ds, es, fs, gs, uMask;
1241 int rc;
1242
1243 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1244 Assert(!CPUMIsGuestIn64BitCode(pVCpu));
1245 /** @todo Rainy day: Test what happens when VERR_EM_INTERPRETER is returned by
1246 * this function. Fear that it may guru on us, thus not converted to
1247 * IEM. */
1248
1249 rc = emRCStackRead(pVM, pVCpu, pRegFrame, &eip, (RTGCPTR)pIretStack , 4);
1250 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &cs, (RTGCPTR)(pIretStack + 4), 4);
1251 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &eflags, (RTGCPTR)(pIretStack + 8), 4);
1252 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1253 AssertReturn(eflags & X86_EFL_VM, VERR_EM_INTERPRETER);
1254
1255 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &esp, (RTGCPTR)(pIretStack + 12), 4);
1256 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ss, (RTGCPTR)(pIretStack + 16), 4);
1257 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &es, (RTGCPTR)(pIretStack + 20), 4);
1258 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &ds, (RTGCPTR)(pIretStack + 24), 4);
1259 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &fs, (RTGCPTR)(pIretStack + 28), 4);
1260 rc |= emRCStackRead(pVM, pVCpu, pRegFrame, &gs, (RTGCPTR)(pIretStack + 32), 4);
1261 AssertRCReturn(rc, VERR_EM_INTERPRETER);
1262
1263 pRegFrame->eip = eip & 0xffff;
1264 pRegFrame->cs.Sel = cs;
1265
1266 /* Mask away all reserved bits */
1267 uMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM | X86_EFL_AC | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_ID;
1268 eflags &= uMask;
1269
1270 CPUMRawSetEFlags(pVCpu, eflags);
1271 Assert((pRegFrame->eflags.u32 & (X86_EFL_IF|X86_EFL_IOPL)) == X86_EFL_IF);
1272
1273 pRegFrame->esp = esp;
1274 pRegFrame->ss.Sel = ss;
1275 pRegFrame->ds.Sel = ds;
1276 pRegFrame->es.Sel = es;
1277 pRegFrame->fs.Sel = fs;
1278 pRegFrame->gs.Sel = gs;
1279
1280 return VINF_SUCCESS;
1281}
1282
1283
1284#endif /* IN_RC */
1285
1286
1287
1288/*
1289 *
1290 * Old interpreter primitives used by HM, move/eliminate later.
1291 * Old interpreter primitives used by HM, move/eliminate later.
1292 * Old interpreter primitives used by HM, move/eliminate later.
1293 * Old interpreter primitives used by HM, move/eliminate later.
1294 * Old interpreter primitives used by HM, move/eliminate later.
1295 *
1296 */
1297
1298
1299/**
1300 * Interpret RDPMC.
1301 *
1302 * @returns VBox status code.
1303 * @param pVM The cross context VM structure.
1304 * @param pVCpu The cross context virtual CPU structure.
1305 * @param pRegFrame The register frame.
1306 *
1307 */
1308VMM_INT_DECL(int) EMInterpretRdpmc(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1309{
1310 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1311 uint32_t uCR4 = CPUMGetGuestCR4(pVCpu);
1312
1313 /* If X86_CR4_PCE is not set, then CPL must be zero. */
1314 if ( !(uCR4 & X86_CR4_PCE)
1315 && CPUMGetGuestCPL(pVCpu) != 0)
1316 {
1317 Assert(CPUMGetGuestCR0(pVCpu) & X86_CR0_PE);
1318 return VERR_EM_INTERPRETER; /* genuine #GP */
1319 }
1320
1321 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
1322 pRegFrame->rax = 0;
1323 pRegFrame->rdx = 0;
1324 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
1325 * ecx but see @bugref{3472}! */
1326
1327 NOREF(pVM);
1328 return VINF_SUCCESS;
1329}
1330
1331
1332/**
1333 * MWAIT Emulation.
1334 */
1335VMM_INT_DECL(VBOXSTRICTRC) EMInterpretMWait(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1336{
1337 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1338 uint32_t u32Dummy, u32ExtFeatures, cpl, u32MWaitFeatures;
1339 NOREF(pVM);
1340
1341 /* Get the current privilege level. */
1342 cpl = CPUMGetGuestCPL(pVCpu);
1343 if (cpl != 0)
1344 return VERR_EM_INTERPRETER; /* supervisor only */
1345
1346 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1347 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1348 return VERR_EM_INTERPRETER; /* not supported */
1349
1350 /*
1351 * CPUID.05H.ECX[0] defines support for power management extensions (eax)
1352 * CPUID.05H.ECX[1] defines support for interrupts as break events for mwait even when IF=0
1353 */
1354 CPUMGetGuestCpuId(pVCpu, 5, 0, &u32Dummy, &u32Dummy, &u32MWaitFeatures, &u32Dummy);
1355 if (pRegFrame->ecx > 1)
1356 {
1357 Log(("EMInterpretMWait: unexpected ecx value %x -> recompiler\n", pRegFrame->ecx));
1358 return VERR_EM_INTERPRETER; /* illegal value. */
1359 }
1360
1361 if (pRegFrame->ecx && !(u32MWaitFeatures & X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
1362 {
1363 Log(("EMInterpretMWait: unsupported X86_CPUID_MWAIT_ECX_BREAKIRQIF0 -> recompiler\n"));
1364 return VERR_EM_INTERPRETER; /* illegal value. */
1365 }
1366
1367 return EMMonitorWaitPerform(pVCpu, pRegFrame->rax, pRegFrame->rcx);
1368}
1369
1370
1371/**
1372 * MONITOR Emulation.
1373 */
1374VMM_INT_DECL(int) EMInterpretMonitor(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)
1375{
1376 uint32_t u32Dummy, u32ExtFeatures, cpl;
1377 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1378 NOREF(pVM);
1379
1380 if (pRegFrame->ecx != 0)
1381 {
1382 Log(("emInterpretMonitor: unexpected ecx=%x -> recompiler!!\n", pRegFrame->ecx));
1383 return VERR_EM_INTERPRETER; /* illegal value. */
1384 }
1385
1386 /* Get the current privilege level. */
1387 cpl = CPUMGetGuestCPL(pVCpu);
1388 if (cpl != 0)
1389 return VERR_EM_INTERPRETER; /* supervisor only */
1390
1391 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Dummy);
1392 if (!(u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR))
1393 return VERR_EM_INTERPRETER; /* not supported */
1394
1395 EMMonitorWaitPrepare(pVCpu, pRegFrame->rax, pRegFrame->rcx, pRegFrame->rdx, NIL_RTGCPHYS);
1396 return VINF_SUCCESS;
1397}
1398
1399
1400/* VT-x only: */
1401
1402/**
1403 * Interpret DRx write.
1404 *
1405 * @returns VBox status code.
1406 * @param pVM The cross context VM structure.
1407 * @param pVCpu The cross context virtual CPU structure.
1408 * @param pRegFrame The register frame.
1409 * @param DestRegDrx DRx register index (USE_REG_DR*)
1410 * @param SrcRegGen General purpose register index (USE_REG_E**))
1411 *
1412 */
1413VMM_INT_DECL(int) EMInterpretDRxWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegDrx, uint32_t SrcRegGen)
1414{
1415 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1416 uint64_t uNewDrX;
1417 int rc;
1418 NOREF(pVM);
1419
1420 if (CPUMIsGuestIn64BitCode(pVCpu))
1421 rc = DISFetchReg64(pRegFrame, SrcRegGen, &uNewDrX);
1422 else
1423 {
1424 uint32_t val32;
1425 rc = DISFetchReg32(pRegFrame, SrcRegGen, &val32);
1426 uNewDrX = val32;
1427 }
1428
1429 if (RT_SUCCESS(rc))
1430 {
1431 if (DestRegDrx == 6)
1432 {
1433 uNewDrX |= X86_DR6_RA1_MASK;
1434 uNewDrX &= ~X86_DR6_RAZ_MASK;
1435 }
1436 else if (DestRegDrx == 7)
1437 {
1438 uNewDrX |= X86_DR7_RA1_MASK;
1439 uNewDrX &= ~X86_DR7_RAZ_MASK;
1440 }
1441
1442 /** @todo we don't fail if illegal bits are set/cleared for e.g. dr7 */
1443 rc = CPUMSetGuestDRx(pVCpu, DestRegDrx, uNewDrX);
1444 if (RT_SUCCESS(rc))
1445 return rc;
1446 AssertMsgFailed(("CPUMSetGuestDRx %d failed\n", DestRegDrx));
1447 }
1448 return VERR_EM_INTERPRETER;
1449}
1450
1451
1452/**
1453 * Interpret DRx read.
1454 *
1455 * @returns VBox status code.
1456 * @param pVM The cross context VM structure.
1457 * @param pVCpu The cross context virtual CPU structure.
1458 * @param pRegFrame The register frame.
1459 * @param DestRegGen General purpose register index (USE_REG_E**))
1460 * @param SrcRegDrx DRx register index (USE_REG_DR*)
1461 */
1462VMM_INT_DECL(int) EMInterpretDRxRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t DestRegGen, uint32_t SrcRegDrx)
1463{
1464 uint64_t val64;
1465 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));
1466 NOREF(pVM);
1467
1468 int rc = CPUMGetGuestDRx(pVCpu, SrcRegDrx, &val64);
1469 AssertMsgRCReturn(rc, ("CPUMGetGuestDRx %d failed\n", SrcRegDrx), VERR_EM_INTERPRETER);
1470 if (CPUMIsGuestIn64BitCode(pVCpu))
1471 rc = DISWriteReg64(pRegFrame, DestRegGen, val64);
1472 else
1473 rc = DISWriteReg32(pRegFrame, DestRegGen, (uint32_t)val64);
1474
1475 if (RT_SUCCESS(rc))
1476 return VINF_SUCCESS;
1477
1478 return VERR_EM_INTERPRETER;
1479}
1480
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use