VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 105465

Last change on this file since 105465 was 105465, checked in by vboxsync, 10 months ago

VMM/IEM: Some more TLB tracing related changes. bugref:10727

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 459.9 KB
Line 
1/* $Id: IEMAll.cpp 105465 2024-07-24 09:05:40Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 /** @todo do large page accounting */ \
225 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
226 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
227 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
228 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
229 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
230 } while (0)
231#else
232# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
233#endif
234
235 /*
236 * Process guest breakpoints.
237 */
238#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
239 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
240 { \
241 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
242 { \
243 case X86_DR7_RW_EO: \
244 fExec |= IEM_F_PENDING_BRK_INSTR; \
245 break; \
246 case X86_DR7_RW_WO: \
247 case X86_DR7_RW_RW: \
248 fExec |= IEM_F_PENDING_BRK_DATA; \
249 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
250 break; \
251 case X86_DR7_RW_IO: \
252 fExec |= IEM_F_PENDING_BRK_X86_IO; \
253 break; \
254 } \
255 } \
256 } while (0)
257
258 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
259 if (fGstDr7 & X86_DR7_ENABLED_MASK)
260 {
261/** @todo extract more details here to simplify matching later. */
262#ifdef IEM_WITH_DATA_TLB
263 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
264#endif
265 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
266 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
267 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
268 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
269 }
270
271 /*
272 * Process hypervisor breakpoints.
273 */
274 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
275 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
276 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
277 {
278/** @todo extract more details here to simplify matching later. */
279 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
282 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
283 }
284
285 return fExec;
286}
287
288
289/**
290 * Initializes the decoder state.
291 *
292 * iemReInitDecoder is mostly a copy of this function.
293 *
294 * @param pVCpu The cross context virtual CPU structure of the
295 * calling thread.
296 * @param fExecOpts Optional execution flags:
297 * - IEM_F_BYPASS_HANDLERS
298 * - IEM_F_X86_DISREGARD_LOCK
299 */
300DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
301{
302 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
312
313 /* Execution state: */
314 uint32_t fExec;
315 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
316
317 /* Decoder state: */
318 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
319 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
320 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
321 {
322 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
323 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
324 }
325 else
326 {
327 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
328 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
329 }
330 pVCpu->iem.s.fPrefixes = 0;
331 pVCpu->iem.s.uRexReg = 0;
332 pVCpu->iem.s.uRexB = 0;
333 pVCpu->iem.s.uRexIndex = 0;
334 pVCpu->iem.s.idxPrefix = 0;
335 pVCpu->iem.s.uVex3rdReg = 0;
336 pVCpu->iem.s.uVexLength = 0;
337 pVCpu->iem.s.fEvexStuff = 0;
338 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
339#ifdef IEM_WITH_CODE_TLB
340 pVCpu->iem.s.pbInstrBuf = NULL;
341 pVCpu->iem.s.offInstrNextByte = 0;
342 pVCpu->iem.s.offCurInstrStart = 0;
343# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
344 pVCpu->iem.s.offOpcode = 0;
345# endif
346# ifdef VBOX_STRICT
347 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
348 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
349 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
350 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
351# endif
352#else
353 pVCpu->iem.s.offOpcode = 0;
354 pVCpu->iem.s.cbOpcode = 0;
355#endif
356 pVCpu->iem.s.offModRm = 0;
357 pVCpu->iem.s.cActiveMappings = 0;
358 pVCpu->iem.s.iNextMapping = 0;
359 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
360
361#ifdef DBGFTRACE_ENABLED
362 switch (IEM_GET_CPU_MODE(pVCpu))
363 {
364 case IEMMODE_64BIT:
365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
366 break;
367 case IEMMODE_32BIT:
368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
369 break;
370 case IEMMODE_16BIT:
371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
372 break;
373 }
374#endif
375}
376
377
378/**
379 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
380 *
381 * This is mostly a copy of iemInitDecoder.
382 *
383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
384 */
385DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
386{
387 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
396
397 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
398 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
399 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
400
401 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
402 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
403 pVCpu->iem.s.enmEffAddrMode = enmMode;
404 if (enmMode != IEMMODE_64BIT)
405 {
406 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
407 pVCpu->iem.s.enmEffOpSize = enmMode;
408 }
409 else
410 {
411 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
412 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
413 }
414 pVCpu->iem.s.fPrefixes = 0;
415 pVCpu->iem.s.uRexReg = 0;
416 pVCpu->iem.s.uRexB = 0;
417 pVCpu->iem.s.uRexIndex = 0;
418 pVCpu->iem.s.idxPrefix = 0;
419 pVCpu->iem.s.uVex3rdReg = 0;
420 pVCpu->iem.s.uVexLength = 0;
421 pVCpu->iem.s.fEvexStuff = 0;
422 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
423#ifdef IEM_WITH_CODE_TLB
424 if (pVCpu->iem.s.pbInstrBuf)
425 {
426 uint64_t off = (enmMode == IEMMODE_64BIT
427 ? pVCpu->cpum.GstCtx.rip
428 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
429 - pVCpu->iem.s.uInstrBufPc;
430 if (off < pVCpu->iem.s.cbInstrBufTotal)
431 {
432 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
433 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
434 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
435 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
436 else
437 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
438 }
439 else
440 {
441 pVCpu->iem.s.pbInstrBuf = NULL;
442 pVCpu->iem.s.offInstrNextByte = 0;
443 pVCpu->iem.s.offCurInstrStart = 0;
444 pVCpu->iem.s.cbInstrBuf = 0;
445 pVCpu->iem.s.cbInstrBufTotal = 0;
446 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
447 }
448 }
449 else
450 {
451 pVCpu->iem.s.offInstrNextByte = 0;
452 pVCpu->iem.s.offCurInstrStart = 0;
453 pVCpu->iem.s.cbInstrBuf = 0;
454 pVCpu->iem.s.cbInstrBufTotal = 0;
455# ifdef VBOX_STRICT
456 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
457# endif
458 }
459# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
460 pVCpu->iem.s.offOpcode = 0;
461# endif
462#else /* !IEM_WITH_CODE_TLB */
463 pVCpu->iem.s.cbOpcode = 0;
464 pVCpu->iem.s.offOpcode = 0;
465#endif /* !IEM_WITH_CODE_TLB */
466 pVCpu->iem.s.offModRm = 0;
467 Assert(pVCpu->iem.s.cActiveMappings == 0);
468 pVCpu->iem.s.iNextMapping = 0;
469 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
470 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
471
472#ifdef DBGFTRACE_ENABLED
473 switch (enmMode)
474 {
475 case IEMMODE_64BIT:
476 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
477 break;
478 case IEMMODE_32BIT:
479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
480 break;
481 case IEMMODE_16BIT:
482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
483 break;
484 }
485#endif
486}
487
488
489
490/**
491 * Prefetch opcodes the first time when starting executing.
492 *
493 * @returns Strict VBox status code.
494 * @param pVCpu The cross context virtual CPU structure of the
495 * calling thread.
496 * @param fExecOpts Optional execution flags:
497 * - IEM_F_BYPASS_HANDLERS
498 * - IEM_F_X86_DISREGARD_LOCK
499 */
500static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
501{
502 iemInitDecoder(pVCpu, fExecOpts);
503
504#ifndef IEM_WITH_CODE_TLB
505 /*
506 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
507 *
508 * First translate CS:rIP to a physical address.
509 *
510 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
511 * all relevant bytes from the first page, as it ASSUMES it's only ever
512 * called for dealing with CS.LIM, page crossing and instructions that
513 * are too long.
514 */
515 uint32_t cbToTryRead;
516 RTGCPTR GCPtrPC;
517 if (IEM_IS_64BIT_CODE(pVCpu))
518 {
519 cbToTryRead = GUEST_PAGE_SIZE;
520 GCPtrPC = pVCpu->cpum.GstCtx.rip;
521 if (IEM_IS_CANONICAL(GCPtrPC))
522 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
523 else
524 return iemRaiseGeneralProtectionFault0(pVCpu);
525 }
526 else
527 {
528 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
529 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
530 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
531 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
532 else
533 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
534 if (cbToTryRead) { /* likely */ }
535 else /* overflowed */
536 {
537 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
538 cbToTryRead = UINT32_MAX;
539 }
540 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
541 Assert(GCPtrPC <= UINT32_MAX);
542 }
543
544 PGMPTWALKFAST WalkFast;
545 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
546 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
547 &WalkFast);
548 if (RT_SUCCESS(rc))
549 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
550 else
551 {
552 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
553# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
554/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
555 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
556 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
557 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
558# endif
559 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
560 }
561#if 0
562 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
566# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
567/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
568# error completely wrong
569 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
570 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
571# endif
572 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
573 }
574 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
575 else
576 {
577 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
578# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
579/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
580# error completely wrong.
581 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
582 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
583# endif
584 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
585 }
586#else
587 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
588 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
589#endif
590 RTGCPHYS const GCPhys = WalkFast.GCPhys;
591
592 /*
593 * Read the bytes at this address.
594 */
595 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
596 if (cbToTryRead > cbLeftOnPage)
597 cbToTryRead = cbLeftOnPage;
598 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
599 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
600
601 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
602 {
603 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
604 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
605 { /* likely */ }
606 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
607 {
608 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
609 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
610 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
611 }
612 else
613 {
614 Log((RT_SUCCESS(rcStrict)
615 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
616 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
617 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
618 return rcStrict;
619 }
620 }
621 else
622 {
623 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
624 if (RT_SUCCESS(rc))
625 { /* likely */ }
626 else
627 {
628 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
629 GCPtrPC, GCPhys, rc, cbToTryRead));
630 return rc;
631 }
632 }
633 pVCpu->iem.s.cbOpcode = cbToTryRead;
634#endif /* !IEM_WITH_CODE_TLB */
635 return VINF_SUCCESS;
636}
637
638
639#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
640/**
641 * Helper for doing large page accounting at TLB load time.
642 */
643template<bool const a_fGlobal>
644DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
645{
646 if (a_fGlobal)
647 pTlb->cTlbGlobalLargePageCurLoads++;
648 else
649 pTlb->cTlbNonGlobalLargePageCurLoads++;
650
651 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
652 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
653 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
654 ? &pTlb->GlobalLargePageRange
655 : &pTlb->NonGlobalLargePageRange;
656 uTagNoRev &= ~(RTGCPTR)fMask;
657 if (uTagNoRev < pRange->uFirstTag)
658 pRange->uFirstTag = uTagNoRev;
659
660 uTagNoRev |= fMask;
661 if (uTagNoRev > pRange->uLastTag)
662 pRange->uLastTag = uTagNoRev;
663}
664#endif
665
666
667#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
668/**
669 * Worker for iemTlbInvalidateAll.
670 */
671template<bool a_fGlobal>
672DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
673{
674 if (!a_fGlobal)
675 pTlb->cTlsFlushes++;
676 else
677 pTlb->cTlsGlobalFlushes++;
678
679 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
680 if (RT_LIKELY(pTlb->uTlbRevision != 0))
681 { /* very likely */ }
682 else
683 {
684 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
685 pTlb->cTlbRevisionRollovers++;
686 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
687 while (i-- > 0)
688 pTlb->aEntries[i * 2].uTag = 0;
689 }
690
691 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
692 pTlb->NonGlobalLargePageRange.uLastTag = 0;
693 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
694
695 if (a_fGlobal)
696 {
697 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
698 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
699 { /* very likely */ }
700 else
701 {
702 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
703 pTlb->cTlbRevisionRollovers++;
704 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
705 while (i-- > 0)
706 pTlb->aEntries[i * 2 + 1].uTag = 0;
707 }
708
709 pTlb->cTlbGlobalLargePageCurLoads = 0;
710 pTlb->GlobalLargePageRange.uLastTag = 0;
711 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
712 }
713}
714#endif
715
716
717/**
718 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
719 */
720template<bool a_fGlobal>
721DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
722{
723#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
724 Log10(("IEMTlbInvalidateAll\n"));
725
726# ifdef IEM_WITH_CODE_TLB
727 pVCpu->iem.s.cbInstrBufTotal = 0;
728 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
729 if (a_fGlobal)
730 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
731 else
732 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
733# endif
734
735# ifdef IEM_WITH_DATA_TLB
736 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
737 if (a_fGlobal)
738 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
739 else
740 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
741# endif
742#else
743 RT_NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates non-global the IEM TLB entries.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVCpu The cross context virtual CPU structure of the calling
754 * thread.
755 */
756VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
757{
758 iemTlbInvalidateAll<false>(pVCpu);
759}
760
761
762/**
763 * Invalidates all the IEM TLB entries.
764 *
765 * This is called internally as well as by PGM when moving GC mappings.
766 *
767 * @param pVCpu The cross context virtual CPU structure of the calling
768 * thread.
769 */
770VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
771{
772 iemTlbInvalidateAll<true>(pVCpu);
773}
774
775
776#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
777
778template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
779DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
780{
781 /* Combine TAG values with the TLB revisions. */
782 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
783 if (a_fNonGlobal)
784 GCPtrTag |= pTlb->uTlbRevision;
785
786 /* Set up the scan. */
787 bool const fPartialScan = IEMTLB_ENTRY_COUNT >= (a_f2MbLargePage ? 512 : 1024);
788 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
789 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + (a_f2MbLargePage ? 512 : 1024) : IEMTLB_ENTRY_COUNT;
790 RTGCPTR const GCPtrTagMask = fPartialScan
791 ? ~(RTGCPTR)0
792 : ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK
793 & ~(RTGCPTR)( ( RT_BIT_64((a_f2MbLargePage ? 9 : 10) - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO)
794 - 1U)
795 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO);
796
797 /*
798 * Do the scanning.
799 */
800 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2)
801 {
802 if (a_fNonGlobal)
803 {
804 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag)
805 {
806 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
807 {
808 pTlb->aEntries[idxEven].uTag = 0;
809 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
810 pVCpu->iem.s.cbInstrBufTotal = 0;
811 }
812 }
813 GCPtrTag++;
814 }
815
816 if (a_fGlobal)
817 {
818 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob)
819 {
820 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
821 {
822 pTlb->aEntries[idxEven + 1].uTag = 0;
823 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
824 pVCpu->iem.s.cbInstrBufTotal = 0;
825 }
826 }
827 GCPtrTagGlob++;
828 }
829 }
830
831}
832
833template<bool const a_fDataTlb, bool const a_f2MbLargePage>
834DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
835{
836 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
837
838 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
839 if ( pTlb->GlobalLargePageRange.uFirstTag >= GCPtrTag
840 && pTlb->GlobalLargePageRange.uLastTag <= GCPtrTag)
841 {
842 if ( pTlb->NonGlobalLargePageRange.uFirstTag < GCPtrTag
843 || pTlb->NonGlobalLargePageRange.uLastTag > GCPtrTag)
844 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
845 else
846 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
847 }
848 else if ( pTlb->NonGlobalLargePageRange.uFirstTag < GCPtrTag
849 || pTlb->NonGlobalLargePageRange.uLastTag > GCPtrTag)
850 { /* Large pages aren't as likely in the non-global TLB half. */ }
851 else
852 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
853}
854
855template<bool const a_fDataTlb>
856DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven)
857{
858 /*
859 * Flush the entry pair.
860 */
861 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
862 {
863 pTlb->aEntries[idxEven].uTag = 0;
864 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
865 pVCpu->iem.s.cbInstrBufTotal = 0;
866 }
867 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
868 {
869 pTlb->aEntries[idxEven + 1].uTag = 0;
870 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
871 pVCpu->iem.s.cbInstrBufTotal = 0;
872 }
873
874 /*
875 * If there are (or has been) large pages in the TLB, we must check if the
876 * address being flushed may involve one of those, as then we'd have to
877 * scan for entries relating to the same page and flush those as well.
878 */
879# if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
880 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)
881# else
882 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)
883# endif
884 {
885 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
886 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
887 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
888 else
889 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
890 }
891}
892
893#endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
894
895/**
896 * Invalidates a page in the TLBs.
897 *
898 * @param pVCpu The cross context virtual CPU structure of the calling
899 * thread.
900 * @param GCPtr The address of the page to invalidate
901 * @thread EMT(pVCpu)
902 */
903VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
904{
905 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
906#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
907 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
908 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
909 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
910 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
911
912# ifdef IEM_WITH_CODE_TLB
913 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
914# endif
915# ifdef IEM_WITH_DATA_TLB
916 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
917# endif
918#else
919 NOREF(pVCpu); NOREF(GCPtr);
920#endif
921}
922
923
924#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
925/**
926 * Invalid both TLBs slow fashion following a rollover.
927 *
928 * Worker for IEMTlbInvalidateAllPhysical,
929 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
930 * iemMemMapJmp and others.
931 *
932 * @thread EMT(pVCpu)
933 */
934static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
935{
936 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
937 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
938 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
939
940 unsigned i;
941# ifdef IEM_WITH_CODE_TLB
942 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
943 while (i-- > 0)
944 {
945 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
946 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
947 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
948 }
949 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
950 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
951# endif
952# ifdef IEM_WITH_DATA_TLB
953 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
954 while (i-- > 0)
955 {
956 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
957 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
958 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
959 }
960 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
961 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
962# endif
963
964}
965#endif
966
967
968/**
969 * Invalidates the host physical aspects of the IEM TLBs.
970 *
971 * This is called internally as well as by PGM when moving GC mappings.
972 *
973 * @param pVCpu The cross context virtual CPU structure of the calling
974 * thread.
975 * @note Currently not used.
976 */
977VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
978{
979#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
980 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
981 Log10(("IEMTlbInvalidateAllPhysical\n"));
982
983# ifdef IEM_WITH_CODE_TLB
984 pVCpu->iem.s.cbInstrBufTotal = 0;
985# endif
986 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
987 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
988 {
989 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
990 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
991 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
992 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
993 }
994 else
995 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
996#else
997 NOREF(pVCpu);
998#endif
999}
1000
1001
1002/**
1003 * Invalidates the host physical aspects of the IEM TLBs.
1004 *
1005 * This is called internally as well as by PGM when moving GC mappings.
1006 *
1007 * @param pVM The cross context VM structure.
1008 * @param idCpuCaller The ID of the calling EMT if available to the caller,
1009 * otherwise NIL_VMCPUID.
1010 * @param enmReason The reason we're called.
1011 *
1012 * @remarks Caller holds the PGM lock.
1013 */
1014VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
1015{
1016#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1017 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
1018 if (pVCpuCaller)
1019 VMCPU_ASSERT_EMT(pVCpuCaller);
1020 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
1021
1022 VMCC_FOR_EACH_VMCPU(pVM)
1023 {
1024# ifdef IEM_WITH_CODE_TLB
1025 if (pVCpuCaller == pVCpu)
1026 pVCpu->iem.s.cbInstrBufTotal = 0;
1027# endif
1028
1029 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
1030 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
1031 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
1032 { /* likely */}
1033 else if (pVCpuCaller != pVCpu)
1034 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
1035 else
1036 {
1037 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1038 continue;
1039 }
1040 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1041 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1042
1043 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1044 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1045 }
1046 VMCC_FOR_EACH_VMCPU_END(pVM);
1047
1048#else
1049 RT_NOREF(pVM, idCpuCaller, enmReason);
1050#endif
1051}
1052
1053
1054/**
1055 * Flushes the prefetch buffer, light version.
1056 */
1057void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
1058{
1059#ifndef IEM_WITH_CODE_TLB
1060 pVCpu->iem.s.cbOpcode = cbInstr;
1061#else
1062 RT_NOREF(pVCpu, cbInstr);
1063#endif
1064}
1065
1066
1067/**
1068 * Flushes the prefetch buffer, heavy version.
1069 */
1070void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
1071{
1072#ifndef IEM_WITH_CODE_TLB
1073 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
1074#elif 1
1075 pVCpu->iem.s.cbInstrBufTotal = 0;
1076 RT_NOREF(cbInstr);
1077#else
1078 RT_NOREF(pVCpu, cbInstr);
1079#endif
1080}
1081
1082
1083
1084#ifdef IEM_WITH_CODE_TLB
1085
1086/**
1087 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1088 * failure and jumps.
1089 *
1090 * We end up here for a number of reasons:
1091 * - pbInstrBuf isn't yet initialized.
1092 * - Advancing beyond the buffer boundrary (e.g. cross page).
1093 * - Advancing beyond the CS segment limit.
1094 * - Fetching from non-mappable page (e.g. MMIO).
1095 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
1096 *
1097 * @param pVCpu The cross context virtual CPU structure of the
1098 * calling thread.
1099 * @param pvDst Where to return the bytes.
1100 * @param cbDst Number of bytes to read. A value of zero is
1101 * allowed for initializing pbInstrBuf (the
1102 * recompiler does this). In this case it is best
1103 * to set pbInstrBuf to NULL prior to the call.
1104 */
1105void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
1106{
1107# ifdef IN_RING3
1108 for (;;)
1109 {
1110 Assert(cbDst <= 8);
1111 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1112
1113 /*
1114 * We might have a partial buffer match, deal with that first to make the
1115 * rest simpler. This is the first part of the cross page/buffer case.
1116 */
1117 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1118 if (pbInstrBuf != NULL)
1119 {
1120 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
1121 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1122 if (offBuf < cbInstrBuf)
1123 {
1124 Assert(offBuf + cbDst > cbInstrBuf);
1125 uint32_t const cbCopy = cbInstrBuf - offBuf;
1126 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
1127
1128 cbDst -= cbCopy;
1129 pvDst = (uint8_t *)pvDst + cbCopy;
1130 offBuf += cbCopy;
1131 }
1132 }
1133
1134 /*
1135 * Check segment limit, figuring how much we're allowed to access at this point.
1136 *
1137 * We will fault immediately if RIP is past the segment limit / in non-canonical
1138 * territory. If we do continue, there are one or more bytes to read before we
1139 * end up in trouble and we need to do that first before faulting.
1140 */
1141 RTGCPTR GCPtrFirst;
1142 uint32_t cbMaxRead;
1143 if (IEM_IS_64BIT_CODE(pVCpu))
1144 {
1145 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1146 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1147 { /* likely */ }
1148 else
1149 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1150 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1151 }
1152 else
1153 {
1154 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1155 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1156 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1157 { /* likely */ }
1158 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1159 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1160 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1161 if (cbMaxRead != 0)
1162 { /* likely */ }
1163 else
1164 {
1165 /* Overflowed because address is 0 and limit is max. */
1166 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1167 cbMaxRead = X86_PAGE_SIZE;
1168 }
1169 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1170 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1171 if (cbMaxRead2 < cbMaxRead)
1172 cbMaxRead = cbMaxRead2;
1173 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1174 }
1175
1176 /*
1177 * Get the TLB entry for this piece of code.
1178 */
1179 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1180 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1181 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1182 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1183 {
1184 /* likely when executing lots of code, otherwise unlikely */
1185# ifdef IEM_WITH_TLB_STATISTICS
1186 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1187# endif
1188 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1189
1190 /* Check TLB page table level access flags. */
1191 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1192 {
1193 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1194 {
1195 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1196 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1197 }
1198 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1199 {
1200 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1201 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1202 }
1203 }
1204
1205 /* Look up the physical page info if necessary. */
1206 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1207 { /* not necessary */ }
1208 else
1209 {
1210 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1211 { /* likely */ }
1212 else
1213 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1214 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1215 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1216 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1217 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1218 }
1219 }
1220 else
1221 {
1222 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1223
1224 /* This page table walking will set A bits as required by the access while performing the walk.
1225 ASSUMES these are set when the address is translated rather than on commit... */
1226 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1227 PGMPTWALKFAST WalkFast;
1228 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1229 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1230 &WalkFast);
1231 if (RT_SUCCESS(rc))
1232 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1233 else
1234 {
1235#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1236 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1237 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1238#endif
1239 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1240 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1241 }
1242
1243 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1244 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1245 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1246 {
1247 pTlbe--;
1248 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1249 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1250 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1251 IEMTLBTRACE_LOAD(pVCpu, GCPtrFirst, false);
1252 }
1253 else
1254 {
1255 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1256 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1257 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1258 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1259 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, false);
1260 }
1261 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1262 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1263 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1264 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1265 pTlbe->GCPhys = GCPhysPg;
1266 pTlbe->pbMappingR3 = NULL;
1267 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1268 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1269 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1270
1271 /* Resolve the physical address. */
1272 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1273 { /* likely */ }
1274 else
1275 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1276 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1277 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1278 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1279 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1280 }
1281
1282# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1283 /*
1284 * Try do a direct read using the pbMappingR3 pointer.
1285 * Note! Do not recheck the physical TLB revision number here as we have the
1286 * wrong response to changes in the else case. If someone is updating
1287 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1288 * pretending we always won the race.
1289 */
1290 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1291 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1292 {
1293 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1294 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1295 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1296 {
1297 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1298 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1299 }
1300 else
1301 {
1302 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1303 if (cbInstr + (uint32_t)cbDst <= 15)
1304 {
1305 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1306 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1307 }
1308 else
1309 {
1310 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1311 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1312 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1313 }
1314 }
1315 if (cbDst <= cbMaxRead)
1316 {
1317 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1318 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1319
1320 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1321 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1322 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1323 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1324 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1325 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1326 else
1327 Assert(!pvDst);
1328 return;
1329 }
1330 pVCpu->iem.s.pbInstrBuf = NULL;
1331
1332 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1333 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1334 }
1335# else
1336# error "refactor as needed"
1337 /*
1338 * If there is no special read handling, so we can read a bit more and
1339 * put it in the prefetch buffer.
1340 */
1341 if ( cbDst < cbMaxRead
1342 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1343 {
1344 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1345 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1346 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1347 { /* likely */ }
1348 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1349 {
1350 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1351 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1352 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1353 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1354 }
1355 else
1356 {
1357 Log((RT_SUCCESS(rcStrict)
1358 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1359 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1360 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1361 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1362 }
1363 }
1364# endif
1365 /*
1366 * Special read handling, so only read exactly what's needed.
1367 * This is a highly unlikely scenario.
1368 */
1369 else
1370 {
1371 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1372
1373 /* Check instruction length. */
1374 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1375 if (RT_LIKELY(cbInstr + cbDst <= 15))
1376 { /* likely */ }
1377 else
1378 {
1379 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1380 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1381 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1382 }
1383
1384 /* Do the reading. */
1385 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1386 if (cbToRead > 0)
1387 {
1388 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1389 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1390 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1391 { /* likely */ }
1392 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1393 {
1394 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1395 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1396 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1397 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1398 }
1399 else
1400 {
1401 Log((RT_SUCCESS(rcStrict)
1402 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1403 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1404 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1405 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1406 }
1407 }
1408
1409 /* Update the state and probably return. */
1410 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1411 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1412 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1413
1414 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1415 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1416 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1417 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1418 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1419 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1420 pVCpu->iem.s.pbInstrBuf = NULL;
1421 if (cbToRead == cbDst)
1422 return;
1423 Assert(cbToRead == cbMaxRead);
1424 }
1425
1426 /*
1427 * More to read, loop.
1428 */
1429 cbDst -= cbMaxRead;
1430 pvDst = (uint8_t *)pvDst + cbMaxRead;
1431 }
1432# else /* !IN_RING3 */
1433 RT_NOREF(pvDst, cbDst);
1434 if (pvDst || cbDst)
1435 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1436# endif /* !IN_RING3 */
1437}
1438
1439#else /* !IEM_WITH_CODE_TLB */
1440
1441/**
1442 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1443 * exception if it fails.
1444 *
1445 * @returns Strict VBox status code.
1446 * @param pVCpu The cross context virtual CPU structure of the
1447 * calling thread.
1448 * @param cbMin The minimum number of bytes relative offOpcode
1449 * that must be read.
1450 */
1451VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1452{
1453 /*
1454 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1455 *
1456 * First translate CS:rIP to a physical address.
1457 */
1458 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1459 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1460 uint8_t const cbLeft = cbOpcode - offOpcode;
1461 Assert(cbLeft < cbMin);
1462 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1463
1464 uint32_t cbToTryRead;
1465 RTGCPTR GCPtrNext;
1466 if (IEM_IS_64BIT_CODE(pVCpu))
1467 {
1468 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1469 if (!IEM_IS_CANONICAL(GCPtrNext))
1470 return iemRaiseGeneralProtectionFault0(pVCpu);
1471 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1472 }
1473 else
1474 {
1475 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1476 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1477 GCPtrNext32 += cbOpcode;
1478 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1479 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1480 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1481 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1482 if (!cbToTryRead) /* overflowed */
1483 {
1484 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1485 cbToTryRead = UINT32_MAX;
1486 /** @todo check out wrapping around the code segment. */
1487 }
1488 if (cbToTryRead < cbMin - cbLeft)
1489 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1490 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1491
1492 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1493 if (cbToTryRead > cbLeftOnPage)
1494 cbToTryRead = cbLeftOnPage;
1495 }
1496
1497 /* Restrict to opcode buffer space.
1498
1499 We're making ASSUMPTIONS here based on work done previously in
1500 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1501 be fetched in case of an instruction crossing two pages. */
1502 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1503 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1504 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1505 { /* likely */ }
1506 else
1507 {
1508 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1509 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1510 return iemRaiseGeneralProtectionFault0(pVCpu);
1511 }
1512
1513 PGMPTWALKFAST WalkFast;
1514 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1515 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1516 &WalkFast);
1517 if (RT_SUCCESS(rc))
1518 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1519 else
1520 {
1521 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1522#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1523 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1524 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1525#endif
1526 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1527 }
1528 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1529 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1530
1531 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1532 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1533
1534 /*
1535 * Read the bytes at this address.
1536 *
1537 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1538 * and since PATM should only patch the start of an instruction there
1539 * should be no need to check again here.
1540 */
1541 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1542 {
1543 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1544 cbToTryRead, PGMACCESSORIGIN_IEM);
1545 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1546 { /* likely */ }
1547 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1548 {
1549 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1550 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1551 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1552 }
1553 else
1554 {
1555 Log((RT_SUCCESS(rcStrict)
1556 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1557 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1558 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1559 return rcStrict;
1560 }
1561 }
1562 else
1563 {
1564 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1565 if (RT_SUCCESS(rc))
1566 { /* likely */ }
1567 else
1568 {
1569 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1570 return rc;
1571 }
1572 }
1573 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1574 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1575
1576 return VINF_SUCCESS;
1577}
1578
1579#endif /* !IEM_WITH_CODE_TLB */
1580#ifndef IEM_WITH_SETJMP
1581
1582/**
1583 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1584 *
1585 * @returns Strict VBox status code.
1586 * @param pVCpu The cross context virtual CPU structure of the
1587 * calling thread.
1588 * @param pb Where to return the opcode byte.
1589 */
1590VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1591{
1592 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1593 if (rcStrict == VINF_SUCCESS)
1594 {
1595 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1596 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1597 pVCpu->iem.s.offOpcode = offOpcode + 1;
1598 }
1599 else
1600 *pb = 0;
1601 return rcStrict;
1602}
1603
1604#else /* IEM_WITH_SETJMP */
1605
1606/**
1607 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1608 *
1609 * @returns The opcode byte.
1610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1611 */
1612uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1613{
1614# ifdef IEM_WITH_CODE_TLB
1615 uint8_t u8;
1616 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1617 return u8;
1618# else
1619 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1620 if (rcStrict == VINF_SUCCESS)
1621 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1622 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1623# endif
1624}
1625
1626#endif /* IEM_WITH_SETJMP */
1627
1628#ifndef IEM_WITH_SETJMP
1629
1630/**
1631 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1632 *
1633 * @returns Strict VBox status code.
1634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1635 * @param pu16 Where to return the opcode dword.
1636 */
1637VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1638{
1639 uint8_t u8;
1640 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1641 if (rcStrict == VINF_SUCCESS)
1642 *pu16 = (int8_t)u8;
1643 return rcStrict;
1644}
1645
1646
1647/**
1648 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1649 *
1650 * @returns Strict VBox status code.
1651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1652 * @param pu32 Where to return the opcode dword.
1653 */
1654VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1655{
1656 uint8_t u8;
1657 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1658 if (rcStrict == VINF_SUCCESS)
1659 *pu32 = (int8_t)u8;
1660 return rcStrict;
1661}
1662
1663
1664/**
1665 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1666 *
1667 * @returns Strict VBox status code.
1668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1669 * @param pu64 Where to return the opcode qword.
1670 */
1671VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1672{
1673 uint8_t u8;
1674 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1675 if (rcStrict == VINF_SUCCESS)
1676 *pu64 = (int8_t)u8;
1677 return rcStrict;
1678}
1679
1680#endif /* !IEM_WITH_SETJMP */
1681
1682
1683#ifndef IEM_WITH_SETJMP
1684
1685/**
1686 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1687 *
1688 * @returns Strict VBox status code.
1689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1690 * @param pu16 Where to return the opcode word.
1691 */
1692VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1693{
1694 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1695 if (rcStrict == VINF_SUCCESS)
1696 {
1697 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1698# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1699 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1700# else
1701 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1702# endif
1703 pVCpu->iem.s.offOpcode = offOpcode + 2;
1704 }
1705 else
1706 *pu16 = 0;
1707 return rcStrict;
1708}
1709
1710#else /* IEM_WITH_SETJMP */
1711
1712/**
1713 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1714 *
1715 * @returns The opcode word.
1716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1717 */
1718uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1719{
1720# ifdef IEM_WITH_CODE_TLB
1721 uint16_t u16;
1722 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1723 return u16;
1724# else
1725 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1726 if (rcStrict == VINF_SUCCESS)
1727 {
1728 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1729 pVCpu->iem.s.offOpcode += 2;
1730# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1731 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1732# else
1733 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1734# endif
1735 }
1736 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1737# endif
1738}
1739
1740#endif /* IEM_WITH_SETJMP */
1741
1742#ifndef IEM_WITH_SETJMP
1743
1744/**
1745 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1746 *
1747 * @returns Strict VBox status code.
1748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1749 * @param pu32 Where to return the opcode double word.
1750 */
1751VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1752{
1753 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1754 if (rcStrict == VINF_SUCCESS)
1755 {
1756 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1757 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1758 pVCpu->iem.s.offOpcode = offOpcode + 2;
1759 }
1760 else
1761 *pu32 = 0;
1762 return rcStrict;
1763}
1764
1765
1766/**
1767 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1768 *
1769 * @returns Strict VBox status code.
1770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1771 * @param pu64 Where to return the opcode quad word.
1772 */
1773VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1774{
1775 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1776 if (rcStrict == VINF_SUCCESS)
1777 {
1778 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1779 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1780 pVCpu->iem.s.offOpcode = offOpcode + 2;
1781 }
1782 else
1783 *pu64 = 0;
1784 return rcStrict;
1785}
1786
1787#endif /* !IEM_WITH_SETJMP */
1788
1789#ifndef IEM_WITH_SETJMP
1790
1791/**
1792 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1793 *
1794 * @returns Strict VBox status code.
1795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1796 * @param pu32 Where to return the opcode dword.
1797 */
1798VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1799{
1800 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1801 if (rcStrict == VINF_SUCCESS)
1802 {
1803 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1804# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1805 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1806# else
1807 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1808 pVCpu->iem.s.abOpcode[offOpcode + 1],
1809 pVCpu->iem.s.abOpcode[offOpcode + 2],
1810 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1811# endif
1812 pVCpu->iem.s.offOpcode = offOpcode + 4;
1813 }
1814 else
1815 *pu32 = 0;
1816 return rcStrict;
1817}
1818
1819#else /* IEM_WITH_SETJMP */
1820
1821/**
1822 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1823 *
1824 * @returns The opcode dword.
1825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1826 */
1827uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1828{
1829# ifdef IEM_WITH_CODE_TLB
1830 uint32_t u32;
1831 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1832 return u32;
1833# else
1834 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1835 if (rcStrict == VINF_SUCCESS)
1836 {
1837 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1838 pVCpu->iem.s.offOpcode = offOpcode + 4;
1839# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1840 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1841# else
1842 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1843 pVCpu->iem.s.abOpcode[offOpcode + 1],
1844 pVCpu->iem.s.abOpcode[offOpcode + 2],
1845 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1846# endif
1847 }
1848 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1849# endif
1850}
1851
1852#endif /* IEM_WITH_SETJMP */
1853
1854#ifndef IEM_WITH_SETJMP
1855
1856/**
1857 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1858 *
1859 * @returns Strict VBox status code.
1860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1861 * @param pu64 Where to return the opcode dword.
1862 */
1863VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1864{
1865 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1866 if (rcStrict == VINF_SUCCESS)
1867 {
1868 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1869 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1870 pVCpu->iem.s.abOpcode[offOpcode + 1],
1871 pVCpu->iem.s.abOpcode[offOpcode + 2],
1872 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1873 pVCpu->iem.s.offOpcode = offOpcode + 4;
1874 }
1875 else
1876 *pu64 = 0;
1877 return rcStrict;
1878}
1879
1880
1881/**
1882 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1883 *
1884 * @returns Strict VBox status code.
1885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1886 * @param pu64 Where to return the opcode qword.
1887 */
1888VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1889{
1890 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1891 if (rcStrict == VINF_SUCCESS)
1892 {
1893 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1894 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1895 pVCpu->iem.s.abOpcode[offOpcode + 1],
1896 pVCpu->iem.s.abOpcode[offOpcode + 2],
1897 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1898 pVCpu->iem.s.offOpcode = offOpcode + 4;
1899 }
1900 else
1901 *pu64 = 0;
1902 return rcStrict;
1903}
1904
1905#endif /* !IEM_WITH_SETJMP */
1906
1907#ifndef IEM_WITH_SETJMP
1908
1909/**
1910 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1911 *
1912 * @returns Strict VBox status code.
1913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1914 * @param pu64 Where to return the opcode qword.
1915 */
1916VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1917{
1918 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1919 if (rcStrict == VINF_SUCCESS)
1920 {
1921 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1922# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1923 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1924# else
1925 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1926 pVCpu->iem.s.abOpcode[offOpcode + 1],
1927 pVCpu->iem.s.abOpcode[offOpcode + 2],
1928 pVCpu->iem.s.abOpcode[offOpcode + 3],
1929 pVCpu->iem.s.abOpcode[offOpcode + 4],
1930 pVCpu->iem.s.abOpcode[offOpcode + 5],
1931 pVCpu->iem.s.abOpcode[offOpcode + 6],
1932 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1933# endif
1934 pVCpu->iem.s.offOpcode = offOpcode + 8;
1935 }
1936 else
1937 *pu64 = 0;
1938 return rcStrict;
1939}
1940
1941#else /* IEM_WITH_SETJMP */
1942
1943/**
1944 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1945 *
1946 * @returns The opcode qword.
1947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1948 */
1949uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1950{
1951# ifdef IEM_WITH_CODE_TLB
1952 uint64_t u64;
1953 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1954 return u64;
1955# else
1956 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1957 if (rcStrict == VINF_SUCCESS)
1958 {
1959 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1960 pVCpu->iem.s.offOpcode = offOpcode + 8;
1961# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1962 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1963# else
1964 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1965 pVCpu->iem.s.abOpcode[offOpcode + 1],
1966 pVCpu->iem.s.abOpcode[offOpcode + 2],
1967 pVCpu->iem.s.abOpcode[offOpcode + 3],
1968 pVCpu->iem.s.abOpcode[offOpcode + 4],
1969 pVCpu->iem.s.abOpcode[offOpcode + 5],
1970 pVCpu->iem.s.abOpcode[offOpcode + 6],
1971 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1972# endif
1973 }
1974 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1975# endif
1976}
1977
1978#endif /* IEM_WITH_SETJMP */
1979
1980
1981
1982/** @name Misc Worker Functions.
1983 * @{
1984 */
1985
1986/**
1987 * Gets the exception class for the specified exception vector.
1988 *
1989 * @returns The class of the specified exception.
1990 * @param uVector The exception vector.
1991 */
1992static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1993{
1994 Assert(uVector <= X86_XCPT_LAST);
1995 switch (uVector)
1996 {
1997 case X86_XCPT_DE:
1998 case X86_XCPT_TS:
1999 case X86_XCPT_NP:
2000 case X86_XCPT_SS:
2001 case X86_XCPT_GP:
2002 case X86_XCPT_SX: /* AMD only */
2003 return IEMXCPTCLASS_CONTRIBUTORY;
2004
2005 case X86_XCPT_PF:
2006 case X86_XCPT_VE: /* Intel only */
2007 return IEMXCPTCLASS_PAGE_FAULT;
2008
2009 case X86_XCPT_DF:
2010 return IEMXCPTCLASS_DOUBLE_FAULT;
2011 }
2012 return IEMXCPTCLASS_BENIGN;
2013}
2014
2015
2016/**
2017 * Evaluates how to handle an exception caused during delivery of another event
2018 * (exception / interrupt).
2019 *
2020 * @returns How to handle the recursive exception.
2021 * @param pVCpu The cross context virtual CPU structure of the
2022 * calling thread.
2023 * @param fPrevFlags The flags of the previous event.
2024 * @param uPrevVector The vector of the previous event.
2025 * @param fCurFlags The flags of the current exception.
2026 * @param uCurVector The vector of the current exception.
2027 * @param pfXcptRaiseInfo Where to store additional information about the
2028 * exception condition. Optional.
2029 */
2030VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
2031 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
2032{
2033 /*
2034 * Only CPU exceptions can be raised while delivering other events, software interrupt
2035 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
2036 */
2037 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
2038 Assert(pVCpu); RT_NOREF(pVCpu);
2039 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
2040
2041 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
2042 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
2043 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2044 {
2045 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
2046 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
2047 {
2048 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
2049 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
2050 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
2051 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
2052 {
2053 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2054 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
2055 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
2056 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
2057 uCurVector, pVCpu->cpum.GstCtx.cr2));
2058 }
2059 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2060 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
2061 {
2062 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2063 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
2064 }
2065 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
2066 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2067 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
2068 {
2069 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
2070 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
2071 }
2072 }
2073 else
2074 {
2075 if (uPrevVector == X86_XCPT_NMI)
2076 {
2077 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
2078 if (uCurVector == X86_XCPT_PF)
2079 {
2080 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
2081 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
2082 }
2083 }
2084 else if ( uPrevVector == X86_XCPT_AC
2085 && uCurVector == X86_XCPT_AC)
2086 {
2087 enmRaise = IEMXCPTRAISE_CPU_HANG;
2088 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
2089 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
2090 }
2091 }
2092 }
2093 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
2094 {
2095 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
2096 if (uCurVector == X86_XCPT_PF)
2097 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
2098 }
2099 else
2100 {
2101 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
2102 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
2103 }
2104
2105 if (pfXcptRaiseInfo)
2106 *pfXcptRaiseInfo = fRaiseInfo;
2107 return enmRaise;
2108}
2109
2110
2111/**
2112 * Enters the CPU shutdown state initiated by a triple fault or other
2113 * unrecoverable conditions.
2114 *
2115 * @returns Strict VBox status code.
2116 * @param pVCpu The cross context virtual CPU structure of the
2117 * calling thread.
2118 */
2119static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
2120{
2121 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2122 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2123
2124 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
2125 {
2126 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
2127 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2128 }
2129
2130 RT_NOREF(pVCpu);
2131 return VINF_EM_TRIPLE_FAULT;
2132}
2133
2134
2135/**
2136 * Validates a new SS segment.
2137 *
2138 * @returns VBox strict status code.
2139 * @param pVCpu The cross context virtual CPU structure of the
2140 * calling thread.
2141 * @param NewSS The new SS selctor.
2142 * @param uCpl The CPL to load the stack for.
2143 * @param pDesc Where to return the descriptor.
2144 */
2145static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
2146{
2147 /* Null selectors are not allowed (we're not called for dispatching
2148 interrupts with SS=0 in long mode). */
2149 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2150 {
2151 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2152 return iemRaiseTaskSwitchFault0(pVCpu);
2153 }
2154
2155 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2156 if ((NewSS & X86_SEL_RPL) != uCpl)
2157 {
2158 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2159 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2160 }
2161
2162 /*
2163 * Read the descriptor.
2164 */
2165 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2166 if (rcStrict != VINF_SUCCESS)
2167 return rcStrict;
2168
2169 /*
2170 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2171 */
2172 if (!pDesc->Legacy.Gen.u1DescType)
2173 {
2174 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2175 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2176 }
2177
2178 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2179 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2180 {
2181 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2182 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2183 }
2184 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2185 {
2186 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2187 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2188 }
2189
2190 /* Is it there? */
2191 /** @todo testcase: Is this checked before the canonical / limit check below? */
2192 if (!pDesc->Legacy.Gen.u1Present)
2193 {
2194 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2195 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2196 }
2197
2198 return VINF_SUCCESS;
2199}
2200
2201/** @} */
2202
2203
2204/** @name Raising Exceptions.
2205 *
2206 * @{
2207 */
2208
2209
2210/**
2211 * Loads the specified stack far pointer from the TSS.
2212 *
2213 * @returns VBox strict status code.
2214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2215 * @param uCpl The CPL to load the stack for.
2216 * @param pSelSS Where to return the new stack segment.
2217 * @param puEsp Where to return the new stack pointer.
2218 */
2219static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2220{
2221 VBOXSTRICTRC rcStrict;
2222 Assert(uCpl < 4);
2223
2224 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2225 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2226 {
2227 /*
2228 * 16-bit TSS (X86TSS16).
2229 */
2230 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2231 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2232 {
2233 uint32_t off = uCpl * 4 + 2;
2234 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2235 {
2236 /** @todo check actual access pattern here. */
2237 uint32_t u32Tmp = 0; /* gcc maybe... */
2238 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2239 if (rcStrict == VINF_SUCCESS)
2240 {
2241 *puEsp = RT_LOWORD(u32Tmp);
2242 *pSelSS = RT_HIWORD(u32Tmp);
2243 return VINF_SUCCESS;
2244 }
2245 }
2246 else
2247 {
2248 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2249 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2250 }
2251 break;
2252 }
2253
2254 /*
2255 * 32-bit TSS (X86TSS32).
2256 */
2257 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2258 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2259 {
2260 uint32_t off = uCpl * 8 + 4;
2261 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2262 {
2263/** @todo check actual access pattern here. */
2264 uint64_t u64Tmp;
2265 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2266 if (rcStrict == VINF_SUCCESS)
2267 {
2268 *puEsp = u64Tmp & UINT32_MAX;
2269 *pSelSS = (RTSEL)(u64Tmp >> 32);
2270 return VINF_SUCCESS;
2271 }
2272 }
2273 else
2274 {
2275 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2276 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2277 }
2278 break;
2279 }
2280
2281 default:
2282 AssertFailed();
2283 rcStrict = VERR_IEM_IPE_4;
2284 break;
2285 }
2286
2287 *puEsp = 0; /* make gcc happy */
2288 *pSelSS = 0; /* make gcc happy */
2289 return rcStrict;
2290}
2291
2292
2293/**
2294 * Loads the specified stack pointer from the 64-bit TSS.
2295 *
2296 * @returns VBox strict status code.
2297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2298 * @param uCpl The CPL to load the stack for.
2299 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2300 * @param puRsp Where to return the new stack pointer.
2301 */
2302static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2303{
2304 Assert(uCpl < 4);
2305 Assert(uIst < 8);
2306 *puRsp = 0; /* make gcc happy */
2307
2308 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2309 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2310
2311 uint32_t off;
2312 if (uIst)
2313 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2314 else
2315 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2316 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2317 {
2318 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2319 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2320 }
2321
2322 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2323}
2324
2325
2326/**
2327 * Adjust the CPU state according to the exception being raised.
2328 *
2329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2330 * @param u8Vector The exception that has been raised.
2331 */
2332DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2333{
2334 switch (u8Vector)
2335 {
2336 case X86_XCPT_DB:
2337 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2338 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2339 break;
2340 /** @todo Read the AMD and Intel exception reference... */
2341 }
2342}
2343
2344
2345/**
2346 * Implements exceptions and interrupts for real mode.
2347 *
2348 * @returns VBox strict status code.
2349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2350 * @param cbInstr The number of bytes to offset rIP by in the return
2351 * address.
2352 * @param u8Vector The interrupt / exception vector number.
2353 * @param fFlags The flags.
2354 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2355 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2356 */
2357static VBOXSTRICTRC
2358iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2359 uint8_t cbInstr,
2360 uint8_t u8Vector,
2361 uint32_t fFlags,
2362 uint16_t uErr,
2363 uint64_t uCr2) RT_NOEXCEPT
2364{
2365 NOREF(uErr); NOREF(uCr2);
2366 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2367
2368 /*
2369 * Read the IDT entry.
2370 */
2371 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2372 {
2373 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2374 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2375 }
2376 RTFAR16 Idte;
2377 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2378 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2379 {
2380 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2381 return rcStrict;
2382 }
2383
2384#ifdef LOG_ENABLED
2385 /* If software interrupt, try decode it if logging is enabled and such. */
2386 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2387 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2388 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2389#endif
2390
2391 /*
2392 * Push the stack frame.
2393 */
2394 uint8_t bUnmapInfo;
2395 uint16_t *pu16Frame;
2396 uint64_t uNewRsp;
2397 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2398 if (rcStrict != VINF_SUCCESS)
2399 return rcStrict;
2400
2401 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2402#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2403 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2404 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2405 fEfl |= UINT16_C(0xf000);
2406#endif
2407 pu16Frame[2] = (uint16_t)fEfl;
2408 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2409 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2410 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2411 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2412 return rcStrict;
2413
2414 /*
2415 * Load the vector address into cs:ip and make exception specific state
2416 * adjustments.
2417 */
2418 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2419 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2420 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2421 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2422 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2423 pVCpu->cpum.GstCtx.rip = Idte.off;
2424 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2425 IEMMISC_SET_EFL(pVCpu, fEfl);
2426
2427 /** @todo do we actually do this in real mode? */
2428 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2429 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2430
2431 /*
2432 * Deal with debug events that follows the exception and clear inhibit flags.
2433 */
2434 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2435 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2436 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2437 else
2438 {
2439 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2440 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2441 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2442 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2443 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2444 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2445 return iemRaiseDebugException(pVCpu);
2446 }
2447
2448 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2449 so best leave them alone in case we're in a weird kind of real mode... */
2450
2451 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2452}
2453
2454
2455/**
2456 * Loads a NULL data selector into when coming from V8086 mode.
2457 *
2458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2459 * @param pSReg Pointer to the segment register.
2460 */
2461DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2462{
2463 pSReg->Sel = 0;
2464 pSReg->ValidSel = 0;
2465 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2466 {
2467 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2468 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2469 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2470 }
2471 else
2472 {
2473 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2474 /** @todo check this on AMD-V */
2475 pSReg->u64Base = 0;
2476 pSReg->u32Limit = 0;
2477 }
2478}
2479
2480
2481/**
2482 * Loads a segment selector during a task switch in V8086 mode.
2483 *
2484 * @param pSReg Pointer to the segment register.
2485 * @param uSel The selector value to load.
2486 */
2487DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2488{
2489 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2490 pSReg->Sel = uSel;
2491 pSReg->ValidSel = uSel;
2492 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2493 pSReg->u64Base = uSel << 4;
2494 pSReg->u32Limit = 0xffff;
2495 pSReg->Attr.u = 0xf3;
2496}
2497
2498
2499/**
2500 * Loads a segment selector during a task switch in protected mode.
2501 *
2502 * In this task switch scenario, we would throw \#TS exceptions rather than
2503 * \#GPs.
2504 *
2505 * @returns VBox strict status code.
2506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2507 * @param pSReg Pointer to the segment register.
2508 * @param uSel The new selector value.
2509 *
2510 * @remarks This does _not_ handle CS or SS.
2511 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2512 */
2513static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2514{
2515 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2516
2517 /* Null data selector. */
2518 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2519 {
2520 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2521 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2522 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2523 return VINF_SUCCESS;
2524 }
2525
2526 /* Fetch the descriptor. */
2527 IEMSELDESC Desc;
2528 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2529 if (rcStrict != VINF_SUCCESS)
2530 {
2531 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2532 VBOXSTRICTRC_VAL(rcStrict)));
2533 return rcStrict;
2534 }
2535
2536 /* Must be a data segment or readable code segment. */
2537 if ( !Desc.Legacy.Gen.u1DescType
2538 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2539 {
2540 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2541 Desc.Legacy.Gen.u4Type));
2542 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2543 }
2544
2545 /* Check privileges for data segments and non-conforming code segments. */
2546 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2547 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2548 {
2549 /* The RPL and the new CPL must be less than or equal to the DPL. */
2550 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2551 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2552 {
2553 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2554 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2555 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2556 }
2557 }
2558
2559 /* Is it there? */
2560 if (!Desc.Legacy.Gen.u1Present)
2561 {
2562 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2563 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2564 }
2565
2566 /* The base and limit. */
2567 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2568 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2569
2570 /*
2571 * Ok, everything checked out fine. Now set the accessed bit before
2572 * committing the result into the registers.
2573 */
2574 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2575 {
2576 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2577 if (rcStrict != VINF_SUCCESS)
2578 return rcStrict;
2579 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2580 }
2581
2582 /* Commit */
2583 pSReg->Sel = uSel;
2584 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2585 pSReg->u32Limit = cbLimit;
2586 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2587 pSReg->ValidSel = uSel;
2588 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2589 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2590 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2591
2592 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2593 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2594 return VINF_SUCCESS;
2595}
2596
2597
2598/**
2599 * Performs a task switch.
2600 *
2601 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2602 * caller is responsible for performing the necessary checks (like DPL, TSS
2603 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2604 * reference for JMP, CALL, IRET.
2605 *
2606 * If the task switch is the due to a software interrupt or hardware exception,
2607 * the caller is responsible for validating the TSS selector and descriptor. See
2608 * Intel Instruction reference for INT n.
2609 *
2610 * @returns VBox strict status code.
2611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2612 * @param enmTaskSwitch The cause of the task switch.
2613 * @param uNextEip The EIP effective after the task switch.
2614 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2615 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2616 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2617 * @param SelTss The TSS selector of the new task.
2618 * @param pNewDescTss Pointer to the new TSS descriptor.
2619 */
2620VBOXSTRICTRC
2621iemTaskSwitch(PVMCPUCC pVCpu,
2622 IEMTASKSWITCH enmTaskSwitch,
2623 uint32_t uNextEip,
2624 uint32_t fFlags,
2625 uint16_t uErr,
2626 uint64_t uCr2,
2627 RTSEL SelTss,
2628 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2629{
2630 Assert(!IEM_IS_REAL_MODE(pVCpu));
2631 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2632 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2633
2634 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2635 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2636 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2637 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2638 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2639
2640 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2641 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2642
2643 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2644 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2645
2646 /* Update CR2 in case it's a page-fault. */
2647 /** @todo This should probably be done much earlier in IEM/PGM. See
2648 * @bugref{5653#c49}. */
2649 if (fFlags & IEM_XCPT_FLAGS_CR2)
2650 pVCpu->cpum.GstCtx.cr2 = uCr2;
2651
2652 /*
2653 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2654 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2655 */
2656 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2657 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2658 if (uNewTssLimit < uNewTssLimitMin)
2659 {
2660 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2661 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2662 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2663 }
2664
2665 /*
2666 * Task switches in VMX non-root mode always cause task switches.
2667 * The new TSS must have been read and validated (DPL, limits etc.) before a
2668 * task-switch VM-exit commences.
2669 *
2670 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2671 */
2672 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2673 {
2674 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2675 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2676 }
2677
2678 /*
2679 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2680 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2681 */
2682 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2683 {
2684 uint64_t const uExitInfo1 = SelTss;
2685 uint64_t uExitInfo2 = uErr;
2686 switch (enmTaskSwitch)
2687 {
2688 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2689 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2690 default: break;
2691 }
2692 if (fFlags & IEM_XCPT_FLAGS_ERR)
2693 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2694 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2695 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2696
2697 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2698 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2699 RT_NOREF2(uExitInfo1, uExitInfo2);
2700 }
2701
2702 /*
2703 * Check the current TSS limit. The last written byte to the current TSS during the
2704 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2705 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2706 *
2707 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2708 * end up with smaller than "legal" TSS limits.
2709 */
2710 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2711 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2712 if (uCurTssLimit < uCurTssLimitMin)
2713 {
2714 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2715 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2716 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2717 }
2718
2719 /*
2720 * Verify that the new TSS can be accessed and map it. Map only the required contents
2721 * and not the entire TSS.
2722 */
2723 uint8_t bUnmapInfoNewTss;
2724 void *pvNewTss;
2725 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2726 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2727 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2728 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2729 * not perform correct translation if this happens. See Intel spec. 7.2.1
2730 * "Task-State Segment". */
2731 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2732/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2733 * Consider wrapping the remainder into a function for simpler cleanup. */
2734 if (rcStrict != VINF_SUCCESS)
2735 {
2736 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2737 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2738 return rcStrict;
2739 }
2740
2741 /*
2742 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2743 */
2744 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2745 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2746 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2747 {
2748 uint8_t bUnmapInfoDescCurTss;
2749 PX86DESC pDescCurTss;
2750 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2751 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2752 if (rcStrict != VINF_SUCCESS)
2753 {
2754 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2755 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2756 return rcStrict;
2757 }
2758
2759 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2760 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2761 if (rcStrict != VINF_SUCCESS)
2762 {
2763 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2764 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2765 return rcStrict;
2766 }
2767
2768 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2769 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2770 {
2771 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2772 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2773 fEFlags &= ~X86_EFL_NT;
2774 }
2775 }
2776
2777 /*
2778 * Save the CPU state into the current TSS.
2779 */
2780 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2781 if (GCPtrNewTss == GCPtrCurTss)
2782 {
2783 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2784 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2785 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2786 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2787 pVCpu->cpum.GstCtx.ldtr.Sel));
2788 }
2789 if (fIsNewTss386)
2790 {
2791 /*
2792 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2793 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2794 */
2795 uint8_t bUnmapInfoCurTss32;
2796 void *pvCurTss32;
2797 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2798 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2799 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2800 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2801 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2802 if (rcStrict != VINF_SUCCESS)
2803 {
2804 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2805 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2806 return rcStrict;
2807 }
2808
2809 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2810 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2811 pCurTss32->eip = uNextEip;
2812 pCurTss32->eflags = fEFlags;
2813 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2814 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2815 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2816 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2817 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2818 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2819 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2820 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2821 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2822 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2823 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2824 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2825 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2826 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2827
2828 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2829 if (rcStrict != VINF_SUCCESS)
2830 {
2831 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2832 VBOXSTRICTRC_VAL(rcStrict)));
2833 return rcStrict;
2834 }
2835 }
2836 else
2837 {
2838 /*
2839 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2840 */
2841 uint8_t bUnmapInfoCurTss16;
2842 void *pvCurTss16;
2843 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2844 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2845 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2846 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2847 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2848 if (rcStrict != VINF_SUCCESS)
2849 {
2850 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2851 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2852 return rcStrict;
2853 }
2854
2855 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2856 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2857 pCurTss16->ip = uNextEip;
2858 pCurTss16->flags = (uint16_t)fEFlags;
2859 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2860 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2861 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2862 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2863 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2864 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2865 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2866 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2867 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2868 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2869 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2870 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2871
2872 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2873 if (rcStrict != VINF_SUCCESS)
2874 {
2875 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2876 VBOXSTRICTRC_VAL(rcStrict)));
2877 return rcStrict;
2878 }
2879 }
2880
2881 /*
2882 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2883 */
2884 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2885 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2886 {
2887 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2888 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2889 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2890 }
2891
2892 /*
2893 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2894 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2895 */
2896 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2897 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2898 bool fNewDebugTrap;
2899 if (fIsNewTss386)
2900 {
2901 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2902 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2903 uNewEip = pNewTss32->eip;
2904 uNewEflags = pNewTss32->eflags;
2905 uNewEax = pNewTss32->eax;
2906 uNewEcx = pNewTss32->ecx;
2907 uNewEdx = pNewTss32->edx;
2908 uNewEbx = pNewTss32->ebx;
2909 uNewEsp = pNewTss32->esp;
2910 uNewEbp = pNewTss32->ebp;
2911 uNewEsi = pNewTss32->esi;
2912 uNewEdi = pNewTss32->edi;
2913 uNewES = pNewTss32->es;
2914 uNewCS = pNewTss32->cs;
2915 uNewSS = pNewTss32->ss;
2916 uNewDS = pNewTss32->ds;
2917 uNewFS = pNewTss32->fs;
2918 uNewGS = pNewTss32->gs;
2919 uNewLdt = pNewTss32->selLdt;
2920 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2921 }
2922 else
2923 {
2924 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2925 uNewCr3 = 0;
2926 uNewEip = pNewTss16->ip;
2927 uNewEflags = pNewTss16->flags;
2928 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2929 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2930 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2931 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2932 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2933 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2934 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2935 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2936 uNewES = pNewTss16->es;
2937 uNewCS = pNewTss16->cs;
2938 uNewSS = pNewTss16->ss;
2939 uNewDS = pNewTss16->ds;
2940 uNewFS = 0;
2941 uNewGS = 0;
2942 uNewLdt = pNewTss16->selLdt;
2943 fNewDebugTrap = false;
2944 }
2945
2946 if (GCPtrNewTss == GCPtrCurTss)
2947 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2948 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2949
2950 /*
2951 * We're done accessing the new TSS.
2952 */
2953 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2954 if (rcStrict != VINF_SUCCESS)
2955 {
2956 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2957 return rcStrict;
2958 }
2959
2960 /*
2961 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2962 */
2963 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2964 {
2965 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2966 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2967 if (rcStrict != VINF_SUCCESS)
2968 {
2969 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2970 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2971 return rcStrict;
2972 }
2973
2974 /* Check that the descriptor indicates the new TSS is available (not busy). */
2975 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2976 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2977 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2978
2979 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2980 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2981 if (rcStrict != VINF_SUCCESS)
2982 {
2983 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2984 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2985 return rcStrict;
2986 }
2987 }
2988
2989 /*
2990 * From this point on, we're technically in the new task. We will defer exceptions
2991 * until the completion of the task switch but before executing any instructions in the new task.
2992 */
2993 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2994 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2995 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2996 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2997 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2998 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2999 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3000
3001 /* Set the busy bit in TR. */
3002 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3003
3004 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3005 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3006 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3007 {
3008 uNewEflags |= X86_EFL_NT;
3009 }
3010
3011 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3012 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
3013 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3014
3015 pVCpu->cpum.GstCtx.eip = uNewEip;
3016 pVCpu->cpum.GstCtx.eax = uNewEax;
3017 pVCpu->cpum.GstCtx.ecx = uNewEcx;
3018 pVCpu->cpum.GstCtx.edx = uNewEdx;
3019 pVCpu->cpum.GstCtx.ebx = uNewEbx;
3020 pVCpu->cpum.GstCtx.esp = uNewEsp;
3021 pVCpu->cpum.GstCtx.ebp = uNewEbp;
3022 pVCpu->cpum.GstCtx.esi = uNewEsi;
3023 pVCpu->cpum.GstCtx.edi = uNewEdi;
3024
3025 uNewEflags &= X86_EFL_LIVE_MASK;
3026 uNewEflags |= X86_EFL_RA1_MASK;
3027 IEMMISC_SET_EFL(pVCpu, uNewEflags);
3028
3029 /*
3030 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3031 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3032 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3033 */
3034 pVCpu->cpum.GstCtx.es.Sel = uNewES;
3035 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
3036
3037 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3038 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
3039
3040 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3041 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
3042
3043 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
3044 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
3045
3046 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
3047 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
3048
3049 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
3050 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
3051 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3052
3053 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
3054 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3055 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
3056 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3057
3058 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3059 {
3060 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
3061 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
3062 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
3063 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
3064 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
3065 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
3066 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3067 }
3068
3069 /*
3070 * Switch CR3 for the new task.
3071 */
3072 if ( fIsNewTss386
3073 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
3074 {
3075 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3076 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3077 AssertRCSuccessReturn(rc, rc);
3078
3079 /* Inform PGM. */
3080 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
3081 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
3082 AssertRCReturn(rc, rc);
3083 /* ignore informational status codes */
3084
3085 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3086 }
3087
3088 /*
3089 * Switch LDTR for the new task.
3090 */
3091 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3092 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
3093 else
3094 {
3095 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3096
3097 IEMSELDESC DescNewLdt;
3098 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3099 if (rcStrict != VINF_SUCCESS)
3100 {
3101 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3102 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3103 return rcStrict;
3104 }
3105 if ( !DescNewLdt.Legacy.Gen.u1Present
3106 || DescNewLdt.Legacy.Gen.u1DescType
3107 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3108 {
3109 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3110 uNewLdt, DescNewLdt.Legacy.u));
3111 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3112 }
3113
3114 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
3115 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3116 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3117 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3118 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3119 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3120 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
3122 }
3123
3124 IEMSELDESC DescSS;
3125 if (IEM_IS_V86_MODE(pVCpu))
3126 {
3127 IEM_SET_CPL(pVCpu, 3);
3128 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
3129 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
3130 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
3131 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
3132 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
3133 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
3134
3135 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
3136 DescSS.Legacy.u = 0;
3137 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
3138 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
3139 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
3140 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
3141 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
3142 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3143 DescSS.Legacy.Gen.u2Dpl = 3;
3144 }
3145 else
3146 {
3147 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
3148
3149 /*
3150 * Load the stack segment for the new task.
3151 */
3152 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3153 {
3154 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3155 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3156 }
3157
3158 /* Fetch the descriptor. */
3159 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3160 if (rcStrict != VINF_SUCCESS)
3161 {
3162 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3163 VBOXSTRICTRC_VAL(rcStrict)));
3164 return rcStrict;
3165 }
3166
3167 /* SS must be a data segment and writable. */
3168 if ( !DescSS.Legacy.Gen.u1DescType
3169 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3170 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3171 {
3172 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3173 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3174 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3175 }
3176
3177 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3178 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3179 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3180 {
3181 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3182 uNewCpl));
3183 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3184 }
3185
3186 /* Is it there? */
3187 if (!DescSS.Legacy.Gen.u1Present)
3188 {
3189 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3190 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3191 }
3192
3193 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3194 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3195
3196 /* Set the accessed bit before committing the result into SS. */
3197 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3198 {
3199 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3200 if (rcStrict != VINF_SUCCESS)
3201 return rcStrict;
3202 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3203 }
3204
3205 /* Commit SS. */
3206 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3207 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3208 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3209 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3210 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3211 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3213
3214 /* CPL has changed, update IEM before loading rest of segments. */
3215 IEM_SET_CPL(pVCpu, uNewCpl);
3216
3217 /*
3218 * Load the data segments for the new task.
3219 */
3220 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3221 if (rcStrict != VINF_SUCCESS)
3222 return rcStrict;
3223 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3224 if (rcStrict != VINF_SUCCESS)
3225 return rcStrict;
3226 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3227 if (rcStrict != VINF_SUCCESS)
3228 return rcStrict;
3229 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3230 if (rcStrict != VINF_SUCCESS)
3231 return rcStrict;
3232
3233 /*
3234 * Load the code segment for the new task.
3235 */
3236 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3237 {
3238 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3239 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3240 }
3241
3242 /* Fetch the descriptor. */
3243 IEMSELDESC DescCS;
3244 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3245 if (rcStrict != VINF_SUCCESS)
3246 {
3247 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3248 return rcStrict;
3249 }
3250
3251 /* CS must be a code segment. */
3252 if ( !DescCS.Legacy.Gen.u1DescType
3253 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3254 {
3255 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3256 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3257 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3258 }
3259
3260 /* For conforming CS, DPL must be less than or equal to the RPL. */
3261 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3262 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3263 {
3264 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3265 DescCS.Legacy.Gen.u2Dpl));
3266 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3267 }
3268
3269 /* For non-conforming CS, DPL must match RPL. */
3270 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3271 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3272 {
3273 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3274 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3275 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3276 }
3277
3278 /* Is it there? */
3279 if (!DescCS.Legacy.Gen.u1Present)
3280 {
3281 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3282 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3283 }
3284
3285 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3286 u64Base = X86DESC_BASE(&DescCS.Legacy);
3287
3288 /* Set the accessed bit before committing the result into CS. */
3289 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3290 {
3291 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3292 if (rcStrict != VINF_SUCCESS)
3293 return rcStrict;
3294 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3295 }
3296
3297 /* Commit CS. */
3298 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3299 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3300 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3301 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3302 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3303 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3305 }
3306
3307 /* Make sure the CPU mode is correct. */
3308 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3309 if (fExecNew != pVCpu->iem.s.fExec)
3310 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3311 pVCpu->iem.s.fExec = fExecNew;
3312
3313 /** @todo Debug trap. */
3314 if (fIsNewTss386 && fNewDebugTrap)
3315 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3316
3317 /*
3318 * Construct the error code masks based on what caused this task switch.
3319 * See Intel Instruction reference for INT.
3320 */
3321 uint16_t uExt;
3322 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3323 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3324 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3325 uExt = 1;
3326 else
3327 uExt = 0;
3328
3329 /*
3330 * Push any error code on to the new stack.
3331 */
3332 if (fFlags & IEM_XCPT_FLAGS_ERR)
3333 {
3334 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3335 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3336 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3337
3338 /* Check that there is sufficient space on the stack. */
3339 /** @todo Factor out segment limit checking for normal/expand down segments
3340 * into a separate function. */
3341 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3342 {
3343 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3344 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3345 {
3346 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3347 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3348 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3349 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3350 }
3351 }
3352 else
3353 {
3354 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3355 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3356 {
3357 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3358 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3359 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3360 }
3361 }
3362
3363
3364 if (fIsNewTss386)
3365 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3366 else
3367 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3368 if (rcStrict != VINF_SUCCESS)
3369 {
3370 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3371 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3372 return rcStrict;
3373 }
3374 }
3375
3376 /* Check the new EIP against the new CS limit. */
3377 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3378 {
3379 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3380 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3381 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3382 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3383 }
3384
3385 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3386 pVCpu->cpum.GstCtx.ss.Sel));
3387 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3388}
3389
3390
3391/**
3392 * Implements exceptions and interrupts for protected mode.
3393 *
3394 * @returns VBox strict status code.
3395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3396 * @param cbInstr The number of bytes to offset rIP by in the return
3397 * address.
3398 * @param u8Vector The interrupt / exception vector number.
3399 * @param fFlags The flags.
3400 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3401 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3402 */
3403static VBOXSTRICTRC
3404iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3405 uint8_t cbInstr,
3406 uint8_t u8Vector,
3407 uint32_t fFlags,
3408 uint16_t uErr,
3409 uint64_t uCr2) RT_NOEXCEPT
3410{
3411 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3412
3413 /*
3414 * Read the IDT entry.
3415 */
3416 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3417 {
3418 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3419 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3420 }
3421 X86DESC Idte;
3422 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3423 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3424 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3425 {
3426 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3427 return rcStrict;
3428 }
3429 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3430 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3431 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3432 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3433
3434 /*
3435 * Check the descriptor type, DPL and such.
3436 * ASSUMES this is done in the same order as described for call-gate calls.
3437 */
3438 if (Idte.Gate.u1DescType)
3439 {
3440 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3441 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3442 }
3443 bool fTaskGate = false;
3444 uint8_t f32BitGate = true;
3445 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3446 switch (Idte.Gate.u4Type)
3447 {
3448 case X86_SEL_TYPE_SYS_UNDEFINED:
3449 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3450 case X86_SEL_TYPE_SYS_LDT:
3451 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3452 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3453 case X86_SEL_TYPE_SYS_UNDEFINED2:
3454 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3455 case X86_SEL_TYPE_SYS_UNDEFINED3:
3456 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3457 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3458 case X86_SEL_TYPE_SYS_UNDEFINED4:
3459 {
3460 /** @todo check what actually happens when the type is wrong...
3461 * esp. call gates. */
3462 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3463 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3464 }
3465
3466 case X86_SEL_TYPE_SYS_286_INT_GATE:
3467 f32BitGate = false;
3468 RT_FALL_THRU();
3469 case X86_SEL_TYPE_SYS_386_INT_GATE:
3470 fEflToClear |= X86_EFL_IF;
3471 break;
3472
3473 case X86_SEL_TYPE_SYS_TASK_GATE:
3474 fTaskGate = true;
3475#ifndef IEM_IMPLEMENTS_TASKSWITCH
3476 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3477#endif
3478 break;
3479
3480 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3481 f32BitGate = false;
3482 break;
3483 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3484 break;
3485
3486 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3487 }
3488
3489 /* Check DPL against CPL if applicable. */
3490 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3491 {
3492 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3493 {
3494 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3495 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3496 }
3497 }
3498
3499 /* Is it there? */
3500 if (!Idte.Gate.u1Present)
3501 {
3502 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3503 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3504 }
3505
3506 /* Is it a task-gate? */
3507 if (fTaskGate)
3508 {
3509 /*
3510 * Construct the error code masks based on what caused this task switch.
3511 * See Intel Instruction reference for INT.
3512 */
3513 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3514 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3515 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3516 RTSEL SelTss = Idte.Gate.u16Sel;
3517
3518 /*
3519 * Fetch the TSS descriptor in the GDT.
3520 */
3521 IEMSELDESC DescTSS;
3522 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3523 if (rcStrict != VINF_SUCCESS)
3524 {
3525 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3526 VBOXSTRICTRC_VAL(rcStrict)));
3527 return rcStrict;
3528 }
3529
3530 /* The TSS descriptor must be a system segment and be available (not busy). */
3531 if ( DescTSS.Legacy.Gen.u1DescType
3532 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3533 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3534 {
3535 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3536 u8Vector, SelTss, DescTSS.Legacy.au64));
3537 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3538 }
3539
3540 /* The TSS must be present. */
3541 if (!DescTSS.Legacy.Gen.u1Present)
3542 {
3543 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3544 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3545 }
3546
3547 /* Do the actual task switch. */
3548 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3549 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3550 fFlags, uErr, uCr2, SelTss, &DescTSS);
3551 }
3552
3553 /* A null CS is bad. */
3554 RTSEL NewCS = Idte.Gate.u16Sel;
3555 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3556 {
3557 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3558 return iemRaiseGeneralProtectionFault0(pVCpu);
3559 }
3560
3561 /* Fetch the descriptor for the new CS. */
3562 IEMSELDESC DescCS;
3563 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3564 if (rcStrict != VINF_SUCCESS)
3565 {
3566 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3567 return rcStrict;
3568 }
3569
3570 /* Must be a code segment. */
3571 if (!DescCS.Legacy.Gen.u1DescType)
3572 {
3573 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3574 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3575 }
3576 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3577 {
3578 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3579 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3580 }
3581
3582 /* Don't allow lowering the privilege level. */
3583 /** @todo Does the lowering of privileges apply to software interrupts
3584 * only? This has bearings on the more-privileged or
3585 * same-privilege stack behavior further down. A testcase would
3586 * be nice. */
3587 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3588 {
3589 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3590 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3591 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3592 }
3593
3594 /* Make sure the selector is present. */
3595 if (!DescCS.Legacy.Gen.u1Present)
3596 {
3597 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3598 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3599 }
3600
3601#ifdef LOG_ENABLED
3602 /* If software interrupt, try decode it if logging is enabled and such. */
3603 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3604 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3605 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3606#endif
3607
3608 /* Check the new EIP against the new CS limit. */
3609 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3610 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3611 ? Idte.Gate.u16OffsetLow
3612 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3613 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3614 if (uNewEip > cbLimitCS)
3615 {
3616 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3617 u8Vector, uNewEip, cbLimitCS, NewCS));
3618 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3619 }
3620 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3621
3622 /* Calc the flag image to push. */
3623 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3624 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3625 fEfl &= ~X86_EFL_RF;
3626 else
3627 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3628
3629 /* From V8086 mode only go to CPL 0. */
3630 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3631 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3632 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3633 {
3634 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3635 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3636 }
3637
3638 /*
3639 * If the privilege level changes, we need to get a new stack from the TSS.
3640 * This in turns means validating the new SS and ESP...
3641 */
3642 if (uNewCpl != IEM_GET_CPL(pVCpu))
3643 {
3644 RTSEL NewSS;
3645 uint32_t uNewEsp;
3646 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3647 if (rcStrict != VINF_SUCCESS)
3648 return rcStrict;
3649
3650 IEMSELDESC DescSS;
3651 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3652 if (rcStrict != VINF_SUCCESS)
3653 return rcStrict;
3654 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3655 if (!DescSS.Legacy.Gen.u1DefBig)
3656 {
3657 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3658 uNewEsp = (uint16_t)uNewEsp;
3659 }
3660
3661 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3662
3663 /* Check that there is sufficient space for the stack frame. */
3664 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3665 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3666 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3667 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3668
3669 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3670 {
3671 if ( uNewEsp - 1 > cbLimitSS
3672 || uNewEsp < cbStackFrame)
3673 {
3674 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3675 u8Vector, NewSS, uNewEsp, cbStackFrame));
3676 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3677 }
3678 }
3679 else
3680 {
3681 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3682 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3683 {
3684 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3685 u8Vector, NewSS, uNewEsp, cbStackFrame));
3686 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3687 }
3688 }
3689
3690 /*
3691 * Start making changes.
3692 */
3693
3694 /* Set the new CPL so that stack accesses use it. */
3695 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3696 IEM_SET_CPL(pVCpu, uNewCpl);
3697
3698 /* Create the stack frame. */
3699 uint8_t bUnmapInfoStackFrame;
3700 RTPTRUNION uStackFrame;
3701 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3702 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3703 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3704 if (rcStrict != VINF_SUCCESS)
3705 return rcStrict;
3706 if (f32BitGate)
3707 {
3708 if (fFlags & IEM_XCPT_FLAGS_ERR)
3709 *uStackFrame.pu32++ = uErr;
3710 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3711 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3712 uStackFrame.pu32[2] = fEfl;
3713 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3714 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3715 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3716 if (fEfl & X86_EFL_VM)
3717 {
3718 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3719 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3720 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3721 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3722 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3723 }
3724 }
3725 else
3726 {
3727 if (fFlags & IEM_XCPT_FLAGS_ERR)
3728 *uStackFrame.pu16++ = uErr;
3729 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3730 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3731 uStackFrame.pu16[2] = fEfl;
3732 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3733 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3734 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3735 if (fEfl & X86_EFL_VM)
3736 {
3737 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3738 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3739 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3740 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3741 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3742 }
3743 }
3744 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3745 if (rcStrict != VINF_SUCCESS)
3746 return rcStrict;
3747
3748 /* Mark the selectors 'accessed' (hope this is the correct time). */
3749 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3750 * after pushing the stack frame? (Write protect the gdt + stack to
3751 * find out.) */
3752 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3753 {
3754 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3755 if (rcStrict != VINF_SUCCESS)
3756 return rcStrict;
3757 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3758 }
3759
3760 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3761 {
3762 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3763 if (rcStrict != VINF_SUCCESS)
3764 return rcStrict;
3765 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3766 }
3767
3768 /*
3769 * Start comitting the register changes (joins with the DPL=CPL branch).
3770 */
3771 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3772 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3773 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3774 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3775 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3776 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3777 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3778 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3779 * SP is loaded).
3780 * Need to check the other combinations too:
3781 * - 16-bit TSS, 32-bit handler
3782 * - 32-bit TSS, 16-bit handler */
3783 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3784 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3785 else
3786 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3787
3788 if (fEfl & X86_EFL_VM)
3789 {
3790 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3791 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3792 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3793 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3794 }
3795 }
3796 /*
3797 * Same privilege, no stack change and smaller stack frame.
3798 */
3799 else
3800 {
3801 uint64_t uNewRsp;
3802 uint8_t bUnmapInfoStackFrame;
3803 RTPTRUNION uStackFrame;
3804 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3805 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3806 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3807 if (rcStrict != VINF_SUCCESS)
3808 return rcStrict;
3809
3810 if (f32BitGate)
3811 {
3812 if (fFlags & IEM_XCPT_FLAGS_ERR)
3813 *uStackFrame.pu32++ = uErr;
3814 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3815 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3816 uStackFrame.pu32[2] = fEfl;
3817 }
3818 else
3819 {
3820 if (fFlags & IEM_XCPT_FLAGS_ERR)
3821 *uStackFrame.pu16++ = uErr;
3822 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3823 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3824 uStackFrame.pu16[2] = fEfl;
3825 }
3826 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3827 if (rcStrict != VINF_SUCCESS)
3828 return rcStrict;
3829
3830 /* Mark the CS selector as 'accessed'. */
3831 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3832 {
3833 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3834 if (rcStrict != VINF_SUCCESS)
3835 return rcStrict;
3836 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3837 }
3838
3839 /*
3840 * Start committing the register changes (joins with the other branch).
3841 */
3842 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3843 }
3844
3845 /* ... register committing continues. */
3846 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3847 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3848 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3849 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3850 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3851 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3852
3853 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3854 fEfl &= ~fEflToClear;
3855 IEMMISC_SET_EFL(pVCpu, fEfl);
3856
3857 if (fFlags & IEM_XCPT_FLAGS_CR2)
3858 pVCpu->cpum.GstCtx.cr2 = uCr2;
3859
3860 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3861 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3862
3863 /* Make sure the execution flags are correct. */
3864 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3865 if (fExecNew != pVCpu->iem.s.fExec)
3866 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3867 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3868 pVCpu->iem.s.fExec = fExecNew;
3869 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3870
3871 /*
3872 * Deal with debug events that follows the exception and clear inhibit flags.
3873 */
3874 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3875 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
3876 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3877 else
3878 {
3879 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
3880 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
3881 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3882 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3883 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
3884 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3885 return iemRaiseDebugException(pVCpu);
3886 }
3887
3888 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3889}
3890
3891
3892/**
3893 * Implements exceptions and interrupts for long mode.
3894 *
3895 * @returns VBox strict status code.
3896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3897 * @param cbInstr The number of bytes to offset rIP by in the return
3898 * address.
3899 * @param u8Vector The interrupt / exception vector number.
3900 * @param fFlags The flags.
3901 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3902 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3903 */
3904static VBOXSTRICTRC
3905iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3906 uint8_t cbInstr,
3907 uint8_t u8Vector,
3908 uint32_t fFlags,
3909 uint16_t uErr,
3910 uint64_t uCr2) RT_NOEXCEPT
3911{
3912 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3913
3914 /*
3915 * Read the IDT entry.
3916 */
3917 uint16_t offIdt = (uint16_t)u8Vector << 4;
3918 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3919 {
3920 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3921 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3922 }
3923 X86DESC64 Idte;
3924#ifdef _MSC_VER /* Shut up silly compiler warning. */
3925 Idte.au64[0] = 0;
3926 Idte.au64[1] = 0;
3927#endif
3928 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3929 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3930 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3931 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3932 {
3933 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3934 return rcStrict;
3935 }
3936 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3937 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3938 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3939
3940 /*
3941 * Check the descriptor type, DPL and such.
3942 * ASSUMES this is done in the same order as described for call-gate calls.
3943 */
3944 if (Idte.Gate.u1DescType)
3945 {
3946 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3947 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3948 }
3949 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3950 switch (Idte.Gate.u4Type)
3951 {
3952 case AMD64_SEL_TYPE_SYS_INT_GATE:
3953 fEflToClear |= X86_EFL_IF;
3954 break;
3955 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3956 break;
3957
3958 default:
3959 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3960 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3961 }
3962
3963 /* Check DPL against CPL if applicable. */
3964 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3965 {
3966 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3967 {
3968 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3969 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3970 }
3971 }
3972
3973 /* Is it there? */
3974 if (!Idte.Gate.u1Present)
3975 {
3976 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3977 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3978 }
3979
3980 /* A null CS is bad. */
3981 RTSEL NewCS = Idte.Gate.u16Sel;
3982 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3983 {
3984 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3985 return iemRaiseGeneralProtectionFault0(pVCpu);
3986 }
3987
3988 /* Fetch the descriptor for the new CS. */
3989 IEMSELDESC DescCS;
3990 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3991 if (rcStrict != VINF_SUCCESS)
3992 {
3993 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3994 return rcStrict;
3995 }
3996
3997 /* Must be a 64-bit code segment. */
3998 if (!DescCS.Long.Gen.u1DescType)
3999 {
4000 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4001 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4002 }
4003 if ( !DescCS.Long.Gen.u1Long
4004 || DescCS.Long.Gen.u1DefBig
4005 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4006 {
4007 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4008 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4009 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4010 }
4011
4012 /* Don't allow lowering the privilege level. For non-conforming CS
4013 selectors, the CS.DPL sets the privilege level the trap/interrupt
4014 handler runs at. For conforming CS selectors, the CPL remains
4015 unchanged, but the CS.DPL must be <= CPL. */
4016 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4017 * when CPU in Ring-0. Result \#GP? */
4018 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
4019 {
4020 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4021 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4022 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4023 }
4024
4025
4026 /* Make sure the selector is present. */
4027 if (!DescCS.Legacy.Gen.u1Present)
4028 {
4029 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4030 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4031 }
4032
4033 /* Check that the new RIP is canonical. */
4034 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4035 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4036 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4037 if (!IEM_IS_CANONICAL(uNewRip))
4038 {
4039 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4040 return iemRaiseGeneralProtectionFault0(pVCpu);
4041 }
4042
4043 /*
4044 * If the privilege level changes or if the IST isn't zero, we need to get
4045 * a new stack from the TSS.
4046 */
4047 uint64_t uNewRsp;
4048 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4049 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
4050 if ( uNewCpl != IEM_GET_CPL(pVCpu)
4051 || Idte.Gate.u3IST != 0)
4052 {
4053 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4054 if (rcStrict != VINF_SUCCESS)
4055 return rcStrict;
4056 }
4057 else
4058 uNewRsp = pVCpu->cpum.GstCtx.rsp;
4059 uNewRsp &= ~(uint64_t)0xf;
4060
4061 /*
4062 * Calc the flag image to push.
4063 */
4064 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4065 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4066 fEfl &= ~X86_EFL_RF;
4067 else
4068 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4069
4070 /*
4071 * Start making changes.
4072 */
4073 /* Set the new CPL so that stack accesses use it. */
4074 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
4075 IEM_SET_CPL(pVCpu, uNewCpl);
4076/** @todo Setting CPL this early seems wrong as it would affect and errors we
4077 * raise accessing the stack and (?) GDT/LDT... */
4078
4079 /* Create the stack frame. */
4080 uint8_t bUnmapInfoStackFrame;
4081 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4082 RTPTRUNION uStackFrame;
4083 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
4084 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
4085 if (rcStrict != VINF_SUCCESS)
4086 return rcStrict;
4087
4088 if (fFlags & IEM_XCPT_FLAGS_ERR)
4089 *uStackFrame.pu64++ = uErr;
4090 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
4091 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4092 uStackFrame.pu64[2] = fEfl;
4093 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
4094 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
4095 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4096 if (rcStrict != VINF_SUCCESS)
4097 return rcStrict;
4098
4099 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4100 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4101 * after pushing the stack frame? (Write protect the gdt + stack to
4102 * find out.) */
4103 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4104 {
4105 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4106 if (rcStrict != VINF_SUCCESS)
4107 return rcStrict;
4108 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4109 }
4110
4111 /*
4112 * Start comitting the register changes.
4113 */
4114 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4115 * hidden registers when interrupting 32-bit or 16-bit code! */
4116 if (uNewCpl != uOldCpl)
4117 {
4118 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
4119 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
4120 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4121 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4122 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4123 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4124 }
4125 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
4126 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4127 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4128 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4129 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4130 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4131 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4132 pVCpu->cpum.GstCtx.rip = uNewRip;
4133
4134 fEfl &= ~fEflToClear;
4135 IEMMISC_SET_EFL(pVCpu, fEfl);
4136
4137 if (fFlags & IEM_XCPT_FLAGS_CR2)
4138 pVCpu->cpum.GstCtx.cr2 = uCr2;
4139
4140 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4141 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4142
4143 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
4144
4145 /*
4146 * Deal with debug events that follows the exception and clear inhibit flags.
4147 */
4148 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4149 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4150 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4151 else
4152 {
4153 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
4154 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4155 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4156 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4157 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4158 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4159 return iemRaiseDebugException(pVCpu);
4160 }
4161
4162 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4163}
4164
4165
4166/**
4167 * Implements exceptions and interrupts.
4168 *
4169 * All exceptions and interrupts goes thru this function!
4170 *
4171 * @returns VBox strict status code.
4172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4173 * @param cbInstr The number of bytes to offset rIP by in the return
4174 * address.
4175 * @param u8Vector The interrupt / exception vector number.
4176 * @param fFlags The flags.
4177 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4178 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4179 */
4180VBOXSTRICTRC
4181iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4182 uint8_t cbInstr,
4183 uint8_t u8Vector,
4184 uint32_t fFlags,
4185 uint16_t uErr,
4186 uint64_t uCr2) RT_NOEXCEPT
4187{
4188 /*
4189 * Get all the state that we might need here.
4190 */
4191 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4192 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4193
4194#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4195 /*
4196 * Flush prefetch buffer
4197 */
4198 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4199#endif
4200
4201 /*
4202 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4203 */
4204 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4205 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4206 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4207 | IEM_XCPT_FLAGS_BP_INSTR
4208 | IEM_XCPT_FLAGS_ICEBP_INSTR
4209 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4210 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4211 {
4212 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4213 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4214 u8Vector = X86_XCPT_GP;
4215 uErr = 0;
4216 }
4217
4218 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4219#ifdef DBGFTRACE_ENABLED
4220 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4221 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4222 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4223#endif
4224
4225 /*
4226 * Check if DBGF wants to intercept the exception.
4227 */
4228 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4229 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4230 { /* likely */ }
4231 else
4232 {
4233 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4234 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4235 if (rcStrict != VINF_SUCCESS)
4236 return rcStrict;
4237 }
4238
4239 /*
4240 * Evaluate whether NMI blocking should be in effect.
4241 * Normally, NMI blocking is in effect whenever we inject an NMI.
4242 */
4243 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4244 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4245
4246#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4247 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4248 {
4249 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4250 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4251 return rcStrict0;
4252
4253 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4254 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4255 {
4256 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4257 fBlockNmi = false;
4258 }
4259 }
4260#endif
4261
4262#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4263 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4264 {
4265 /*
4266 * If the event is being injected as part of VMRUN, it isn't subject to event
4267 * intercepts in the nested-guest. However, secondary exceptions that occur
4268 * during injection of any event -are- subject to exception intercepts.
4269 *
4270 * See AMD spec. 15.20 "Event Injection".
4271 */
4272 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4273 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4274 else
4275 {
4276 /*
4277 * Check and handle if the event being raised is intercepted.
4278 */
4279 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4280 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4281 return rcStrict0;
4282 }
4283 }
4284#endif
4285
4286 /*
4287 * Set NMI blocking if necessary.
4288 */
4289 if (fBlockNmi)
4290 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4291
4292 /*
4293 * Do recursion accounting.
4294 */
4295 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4296 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4297 if (pVCpu->iem.s.cXcptRecursions == 0)
4298 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4299 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4300 else
4301 {
4302 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4303 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4304 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4305
4306 if (pVCpu->iem.s.cXcptRecursions >= 4)
4307 {
4308#ifdef DEBUG_bird
4309 AssertFailed();
4310#endif
4311 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4312 }
4313
4314 /*
4315 * Evaluate the sequence of recurring events.
4316 */
4317 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4318 NULL /* pXcptRaiseInfo */);
4319 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4320 { /* likely */ }
4321 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4322 {
4323 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4324 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4325 u8Vector = X86_XCPT_DF;
4326 uErr = 0;
4327#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4328 /* VMX nested-guest #DF intercept needs to be checked here. */
4329 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4330 {
4331 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4332 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4333 return rcStrict0;
4334 }
4335#endif
4336 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4337 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4338 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4339 }
4340 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4341 {
4342 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4343 return iemInitiateCpuShutdown(pVCpu);
4344 }
4345 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4346 {
4347 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4348 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4349 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4350 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4351 return VERR_EM_GUEST_CPU_HANG;
4352 }
4353 else
4354 {
4355 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4356 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4357 return VERR_IEM_IPE_9;
4358 }
4359
4360 /*
4361 * The 'EXT' bit is set when an exception occurs during deliver of an external
4362 * event (such as an interrupt or earlier exception)[1]. Privileged software
4363 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4364 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4365 *
4366 * [1] - Intel spec. 6.13 "Error Code"
4367 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4368 * [3] - Intel Instruction reference for INT n.
4369 */
4370 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4371 && (fFlags & IEM_XCPT_FLAGS_ERR)
4372 && u8Vector != X86_XCPT_PF
4373 && u8Vector != X86_XCPT_DF)
4374 {
4375 uErr |= X86_TRAP_ERR_EXTERNAL;
4376 }
4377 }
4378
4379 pVCpu->iem.s.cXcptRecursions++;
4380 pVCpu->iem.s.uCurXcpt = u8Vector;
4381 pVCpu->iem.s.fCurXcpt = fFlags;
4382 pVCpu->iem.s.uCurXcptErr = uErr;
4383 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4384
4385 /*
4386 * Extensive logging.
4387 */
4388#if defined(LOG_ENABLED) && defined(IN_RING3)
4389 if (LogIs3Enabled())
4390 {
4391 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4392 char szRegs[4096];
4393 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4394 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4395 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4396 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4397 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4398 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4399 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4400 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4401 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4402 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4403 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4404 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4405 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4406 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4407 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4408 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4409 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4410 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4411 " efer=%016VR{efer}\n"
4412 " pat=%016VR{pat}\n"
4413 " sf_mask=%016VR{sf_mask}\n"
4414 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4415 " lstar=%016VR{lstar}\n"
4416 " star=%016VR{star} cstar=%016VR{cstar}\n"
4417 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4418 );
4419
4420 char szInstr[256];
4421 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4422 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4423 szInstr, sizeof(szInstr), NULL);
4424 Log3(("%s%s\n", szRegs, szInstr));
4425 }
4426#endif /* LOG_ENABLED */
4427
4428 /*
4429 * Stats.
4430 */
4431 uint64_t const uTimestamp = ASMReadTSC();
4432 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4433 {
4434 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4435 EMHistoryAddExit(pVCpu,
4436 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4437 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4438 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4439 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4440 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);
4441 }
4442 else
4443 {
4444 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4445 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4446 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4447 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4448 if (fFlags & IEM_XCPT_FLAGS_ERR)
4449 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4450 if (fFlags & IEM_XCPT_FLAGS_CR2)
4451 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4452 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);
4453 }
4454
4455 /*
4456 * Hack alert! Convert incoming debug events to slient on Intel.
4457 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4458 */
4459 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4460 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4461 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4462 { /* ignore */ }
4463 else
4464 {
4465 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4466 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4467 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4468 | CPUMCTX_DBG_HIT_DRX_SILENT;
4469 }
4470
4471 /*
4472 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4473 * to ensure that a stale TLB or paging cache entry will only cause one
4474 * spurious #PF.
4475 */
4476 if ( u8Vector == X86_XCPT_PF
4477 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4478 IEMTlbInvalidatePage(pVCpu, uCr2);
4479
4480 /*
4481 * Call the mode specific worker function.
4482 */
4483 VBOXSTRICTRC rcStrict;
4484 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4485 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4486 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4487 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4488 else
4489 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4490
4491 /* Flush the prefetch buffer. */
4492 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4493
4494 /*
4495 * Unwind.
4496 */
4497 pVCpu->iem.s.cXcptRecursions--;
4498 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4499 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4500 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4501 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4502 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4503 return rcStrict;
4504}
4505
4506#ifdef IEM_WITH_SETJMP
4507/**
4508 * See iemRaiseXcptOrInt. Will not return.
4509 */
4510DECL_NO_RETURN(void)
4511iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4512 uint8_t cbInstr,
4513 uint8_t u8Vector,
4514 uint32_t fFlags,
4515 uint16_t uErr,
4516 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4517{
4518 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4519 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4520}
4521#endif
4522
4523
4524/** \#DE - 00. */
4525VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4526{
4527 if (GCMIsInterceptingXcptDE(pVCpu))
4528 {
4529 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4530 if (rc == VINF_SUCCESS)
4531 {
4532 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4533 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4534 }
4535 }
4536 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4537}
4538
4539
4540#ifdef IEM_WITH_SETJMP
4541/** \#DE - 00. */
4542DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4543{
4544 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4545}
4546#endif
4547
4548
4549/** \#DB - 01.
4550 * @note This automatically clear DR7.GD. */
4551VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4552{
4553 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4554 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4555 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4556}
4557
4558
4559/** \#BR - 05. */
4560VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4561{
4562 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4563}
4564
4565
4566/** \#UD - 06. */
4567VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4568{
4569 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4570}
4571
4572
4573#ifdef IEM_WITH_SETJMP
4574/** \#UD - 06. */
4575DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4576{
4577 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4578}
4579#endif
4580
4581
4582/** \#NM - 07. */
4583VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4584{
4585 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4586}
4587
4588
4589#ifdef IEM_WITH_SETJMP
4590/** \#NM - 07. */
4591DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4592{
4593 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4594}
4595#endif
4596
4597
4598/** \#TS(err) - 0a. */
4599VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4600{
4601 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4602}
4603
4604
4605/** \#TS(tr) - 0a. */
4606VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4607{
4608 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4609 pVCpu->cpum.GstCtx.tr.Sel, 0);
4610}
4611
4612
4613/** \#TS(0) - 0a. */
4614VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4615{
4616 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4617 0, 0);
4618}
4619
4620
4621/** \#TS(err) - 0a. */
4622VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4623{
4624 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4625 uSel & X86_SEL_MASK_OFF_RPL, 0);
4626}
4627
4628
4629/** \#NP(err) - 0b. */
4630VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4631{
4632 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4633}
4634
4635
4636/** \#NP(sel) - 0b. */
4637VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4638{
4639 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4640 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4641 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4642 uSel & ~X86_SEL_RPL, 0);
4643}
4644
4645
4646/** \#SS(seg) - 0c. */
4647VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4648{
4649 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4650 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4651 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4652 uSel & ~X86_SEL_RPL, 0);
4653}
4654
4655
4656/** \#SS(err) - 0c. */
4657VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4658{
4659 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4660 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4661 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4662}
4663
4664
4665/** \#GP(n) - 0d. */
4666VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4667{
4668 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4669 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4670}
4671
4672
4673/** \#GP(0) - 0d. */
4674VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4675{
4676 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4677 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4678}
4679
4680#ifdef IEM_WITH_SETJMP
4681/** \#GP(0) - 0d. */
4682DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4683{
4684 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4685 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4686}
4687#endif
4688
4689
4690/** \#GP(sel) - 0d. */
4691VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4692{
4693 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4694 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4696 Sel & ~X86_SEL_RPL, 0);
4697}
4698
4699
4700/** \#GP(0) - 0d. */
4701VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4702{
4703 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4704 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4705}
4706
4707
4708/** \#GP(sel) - 0d. */
4709VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4710{
4711 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4712 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4713 NOREF(iSegReg); NOREF(fAccess);
4714 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4715 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4716}
4717
4718#ifdef IEM_WITH_SETJMP
4719/** \#GP(sel) - 0d, longjmp. */
4720DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4721{
4722 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4723 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4724 NOREF(iSegReg); NOREF(fAccess);
4725 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4726 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4727}
4728#endif
4729
4730/** \#GP(sel) - 0d. */
4731VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4732{
4733 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4734 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4735 NOREF(Sel);
4736 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4737}
4738
4739#ifdef IEM_WITH_SETJMP
4740/** \#GP(sel) - 0d, longjmp. */
4741DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4742{
4743 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4744 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4745 NOREF(Sel);
4746 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4747}
4748#endif
4749
4750
4751/** \#GP(sel) - 0d. */
4752VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4753{
4754 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4755 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4756 NOREF(iSegReg); NOREF(fAccess);
4757 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4758}
4759
4760#ifdef IEM_WITH_SETJMP
4761/** \#GP(sel) - 0d, longjmp. */
4762DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4763{
4764 NOREF(iSegReg); NOREF(fAccess);
4765 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4766}
4767#endif
4768
4769
4770/** \#PF(n) - 0e. */
4771VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4772{
4773 uint16_t uErr;
4774 switch (rc)
4775 {
4776 case VERR_PAGE_NOT_PRESENT:
4777 case VERR_PAGE_TABLE_NOT_PRESENT:
4778 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4779 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4780 uErr = 0;
4781 break;
4782
4783 case VERR_RESERVED_PAGE_TABLE_BITS:
4784 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4785 break;
4786
4787 default:
4788 AssertMsgFailed(("%Rrc\n", rc));
4789 RT_FALL_THRU();
4790 case VERR_ACCESS_DENIED:
4791 uErr = X86_TRAP_PF_P;
4792 break;
4793 }
4794
4795 if (IEM_GET_CPL(pVCpu) == 3)
4796 uErr |= X86_TRAP_PF_US;
4797
4798 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4799 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4800 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4801 uErr |= X86_TRAP_PF_ID;
4802
4803#if 0 /* This is so much non-sense, really. Why was it done like that? */
4804 /* Note! RW access callers reporting a WRITE protection fault, will clear
4805 the READ flag before calling. So, read-modify-write accesses (RW)
4806 can safely be reported as READ faults. */
4807 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4808 uErr |= X86_TRAP_PF_RW;
4809#else
4810 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4811 {
4812 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4813 /// (regardless of outcome of the comparison in the latter case).
4814 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4815 uErr |= X86_TRAP_PF_RW;
4816 }
4817#endif
4818
4819 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4820 of the memory operand rather than at the start of it. (Not sure what
4821 happens if it crosses a page boundrary.) The current heuristics for
4822 this is to report the #PF for the last byte if the access is more than
4823 64 bytes. This is probably not correct, but we can work that out later,
4824 main objective now is to get FXSAVE to work like for real hardware and
4825 make bs3-cpu-basic2 work. */
4826 if (cbAccess <= 64)
4827 { /* likely*/ }
4828 else
4829 GCPtrWhere += cbAccess - 1;
4830
4831 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4832 uErr, GCPtrWhere);
4833}
4834
4835#ifdef IEM_WITH_SETJMP
4836/** \#PF(n) - 0e, longjmp. */
4837DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4838 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4839{
4840 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4841}
4842#endif
4843
4844
4845/** \#MF(0) - 10. */
4846VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4847{
4848 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4849 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4850
4851 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4852 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4853 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4854}
4855
4856#ifdef IEM_WITH_SETJMP
4857/** \#MF(0) - 10, longjmp. */
4858DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4859{
4860 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4861}
4862#endif
4863
4864
4865/** \#AC(0) - 11. */
4866VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4867{
4868 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4869}
4870
4871#ifdef IEM_WITH_SETJMP
4872/** \#AC(0) - 11, longjmp. */
4873DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4874{
4875 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4876}
4877#endif
4878
4879
4880/** \#XF(0)/\#XM(0) - 19. */
4881VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4882{
4883 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4884}
4885
4886
4887#ifdef IEM_WITH_SETJMP
4888/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4889DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4890{
4891 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4892}
4893#endif
4894
4895
4896/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4897IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4898{
4899 NOREF(cbInstr);
4900 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4901}
4902
4903
4904/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4905IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4906{
4907 NOREF(cbInstr);
4908 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4909}
4910
4911
4912/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4913IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4914{
4915 NOREF(cbInstr);
4916 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4917}
4918
4919
4920/** @} */
4921
4922/** @name Common opcode decoders.
4923 * @{
4924 */
4925//#include <iprt/mem.h>
4926
4927/**
4928 * Used to add extra details about a stub case.
4929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4930 */
4931void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4932{
4933#if defined(LOG_ENABLED) && defined(IN_RING3)
4934 PVM pVM = pVCpu->CTX_SUFF(pVM);
4935 char szRegs[4096];
4936 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4937 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4938 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4939 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4940 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4941 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4942 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4943 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4944 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4945 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4946 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4947 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4948 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4949 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4950 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4951 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4952 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4953 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4954 " efer=%016VR{efer}\n"
4955 " pat=%016VR{pat}\n"
4956 " sf_mask=%016VR{sf_mask}\n"
4957 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4958 " lstar=%016VR{lstar}\n"
4959 " star=%016VR{star} cstar=%016VR{cstar}\n"
4960 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4961 );
4962
4963 char szInstr[256];
4964 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4965 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4966 szInstr, sizeof(szInstr), NULL);
4967
4968 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4969#else
4970 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4971#endif
4972}
4973
4974/** @} */
4975
4976
4977
4978/** @name Register Access.
4979 * @{
4980 */
4981
4982/**
4983 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4984 *
4985 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4986 * segment limit.
4987 *
4988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4989 * @param cbInstr Instruction size.
4990 * @param offNextInstr The offset of the next instruction.
4991 * @param enmEffOpSize Effective operand size.
4992 */
4993VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4994 IEMMODE enmEffOpSize) RT_NOEXCEPT
4995{
4996 switch (enmEffOpSize)
4997 {
4998 case IEMMODE_16BIT:
4999 {
5000 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
5001 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5002 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
5003 pVCpu->cpum.GstCtx.rip = uNewIp;
5004 else
5005 return iemRaiseGeneralProtectionFault0(pVCpu);
5006 break;
5007 }
5008
5009 case IEMMODE_32BIT:
5010 {
5011 Assert(!IEM_IS_64BIT_CODE(pVCpu));
5012 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
5013
5014 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
5015 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5016 pVCpu->cpum.GstCtx.rip = uNewEip;
5017 else
5018 return iemRaiseGeneralProtectionFault0(pVCpu);
5019 break;
5020 }
5021
5022 case IEMMODE_64BIT:
5023 {
5024 Assert(IEM_IS_64BIT_CODE(pVCpu));
5025
5026 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5027 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5028 pVCpu->cpum.GstCtx.rip = uNewRip;
5029 else
5030 return iemRaiseGeneralProtectionFault0(pVCpu);
5031 break;
5032 }
5033
5034 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5035 }
5036
5037#ifndef IEM_WITH_CODE_TLB
5038 /* Flush the prefetch buffer. */
5039 pVCpu->iem.s.cbOpcode = cbInstr;
5040#endif
5041
5042 /*
5043 * Clear RF and finish the instruction (maybe raise #DB).
5044 */
5045 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5046}
5047
5048
5049/**
5050 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5051 *
5052 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5053 * segment limit.
5054 *
5055 * @returns Strict VBox status code.
5056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5057 * @param cbInstr Instruction size.
5058 * @param offNextInstr The offset of the next instruction.
5059 */
5060VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
5061{
5062 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5063
5064 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
5065 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5066 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
5067 pVCpu->cpum.GstCtx.rip = uNewIp;
5068 else
5069 return iemRaiseGeneralProtectionFault0(pVCpu);
5070
5071#ifndef IEM_WITH_CODE_TLB
5072 /* Flush the prefetch buffer. */
5073 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5074#endif
5075
5076 /*
5077 * Clear RF and finish the instruction (maybe raise #DB).
5078 */
5079 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5080}
5081
5082
5083/**
5084 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5085 *
5086 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5087 * segment limit.
5088 *
5089 * @returns Strict VBox status code.
5090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5091 * @param cbInstr Instruction size.
5092 * @param offNextInstr The offset of the next instruction.
5093 * @param enmEffOpSize Effective operand size.
5094 */
5095VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5096 IEMMODE enmEffOpSize) RT_NOEXCEPT
5097{
5098 if (enmEffOpSize == IEMMODE_32BIT)
5099 {
5100 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
5101
5102 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
5103 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5104 pVCpu->cpum.GstCtx.rip = uNewEip;
5105 else
5106 return iemRaiseGeneralProtectionFault0(pVCpu);
5107 }
5108 else
5109 {
5110 Assert(enmEffOpSize == IEMMODE_64BIT);
5111
5112 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5113 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5114 pVCpu->cpum.GstCtx.rip = uNewRip;
5115 else
5116 return iemRaiseGeneralProtectionFault0(pVCpu);
5117 }
5118
5119#ifndef IEM_WITH_CODE_TLB
5120 /* Flush the prefetch buffer. */
5121 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5122#endif
5123
5124 /*
5125 * Clear RF and finish the instruction (maybe raise #DB).
5126 */
5127 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5128}
5129
5130/** @} */
5131
5132
5133/** @name FPU access and helpers.
5134 *
5135 * @{
5136 */
5137
5138/**
5139 * Updates the x87.DS and FPUDP registers.
5140 *
5141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5142 * @param pFpuCtx The FPU context.
5143 * @param iEffSeg The effective segment register.
5144 * @param GCPtrEff The effective address relative to @a iEffSeg.
5145 */
5146DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5147{
5148 RTSEL sel;
5149 switch (iEffSeg)
5150 {
5151 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
5152 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
5153 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
5154 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
5155 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
5156 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
5157 default:
5158 AssertMsgFailed(("%d\n", iEffSeg));
5159 sel = pVCpu->cpum.GstCtx.ds.Sel;
5160 }
5161 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5162 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5163 {
5164 pFpuCtx->DS = 0;
5165 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5166 }
5167 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5168 {
5169 pFpuCtx->DS = sel;
5170 pFpuCtx->FPUDP = GCPtrEff;
5171 }
5172 else
5173 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5174}
5175
5176
5177/**
5178 * Rotates the stack registers in the push direction.
5179 *
5180 * @param pFpuCtx The FPU context.
5181 * @remarks This is a complete waste of time, but fxsave stores the registers in
5182 * stack order.
5183 */
5184DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5185{
5186 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5187 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5188 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5189 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5190 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5191 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5192 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5193 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5194 pFpuCtx->aRegs[0].r80 = r80Tmp;
5195}
5196
5197
5198/**
5199 * Rotates the stack registers in the pop direction.
5200 *
5201 * @param pFpuCtx The FPU context.
5202 * @remarks This is a complete waste of time, but fxsave stores the registers in
5203 * stack order.
5204 */
5205DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5206{
5207 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5208 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5209 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5210 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5211 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5212 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5213 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5214 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5215 pFpuCtx->aRegs[7].r80 = r80Tmp;
5216}
5217
5218
5219/**
5220 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5221 * exception prevents it.
5222 *
5223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5224 * @param pResult The FPU operation result to push.
5225 * @param pFpuCtx The FPU context.
5226 */
5227static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5228{
5229 /* Update FSW and bail if there are pending exceptions afterwards. */
5230 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5231 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5232 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5233 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5234 {
5235 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5236 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5237 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5238 pFpuCtx->FSW = fFsw;
5239 return;
5240 }
5241
5242 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5243 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5244 {
5245 /* All is fine, push the actual value. */
5246 pFpuCtx->FTW |= RT_BIT(iNewTop);
5247 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5248 }
5249 else if (pFpuCtx->FCW & X86_FCW_IM)
5250 {
5251 /* Masked stack overflow, push QNaN. */
5252 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5253 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5254 }
5255 else
5256 {
5257 /* Raise stack overflow, don't push anything. */
5258 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5259 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5260 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5261 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5262 return;
5263 }
5264
5265 fFsw &= ~X86_FSW_TOP_MASK;
5266 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5267 pFpuCtx->FSW = fFsw;
5268
5269 iemFpuRotateStackPush(pFpuCtx);
5270 RT_NOREF(pVCpu);
5271}
5272
5273
5274/**
5275 * Stores a result in a FPU register and updates the FSW and FTW.
5276 *
5277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5278 * @param pFpuCtx The FPU context.
5279 * @param pResult The result to store.
5280 * @param iStReg Which FPU register to store it in.
5281 */
5282static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5283{
5284 Assert(iStReg < 8);
5285 uint16_t fNewFsw = pFpuCtx->FSW;
5286 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5287 fNewFsw &= ~X86_FSW_C_MASK;
5288 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5289 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5290 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5291 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5292 pFpuCtx->FSW = fNewFsw;
5293 pFpuCtx->FTW |= RT_BIT(iReg);
5294 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5295 RT_NOREF(pVCpu);
5296}
5297
5298
5299/**
5300 * Only updates the FPU status word (FSW) with the result of the current
5301 * instruction.
5302 *
5303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5304 * @param pFpuCtx The FPU context.
5305 * @param u16FSW The FSW output of the current instruction.
5306 */
5307static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5308{
5309 uint16_t fNewFsw = pFpuCtx->FSW;
5310 fNewFsw &= ~X86_FSW_C_MASK;
5311 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5312 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5313 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5314 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5315 pFpuCtx->FSW = fNewFsw;
5316 RT_NOREF(pVCpu);
5317}
5318
5319
5320/**
5321 * Pops one item off the FPU stack if no pending exception prevents it.
5322 *
5323 * @param pFpuCtx The FPU context.
5324 */
5325static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5326{
5327 /* Check pending exceptions. */
5328 uint16_t uFSW = pFpuCtx->FSW;
5329 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5330 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5331 return;
5332
5333 /* TOP--. */
5334 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5335 uFSW &= ~X86_FSW_TOP_MASK;
5336 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5337 pFpuCtx->FSW = uFSW;
5338
5339 /* Mark the previous ST0 as empty. */
5340 iOldTop >>= X86_FSW_TOP_SHIFT;
5341 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5342
5343 /* Rotate the registers. */
5344 iemFpuRotateStackPop(pFpuCtx);
5345}
5346
5347
5348/**
5349 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5350 *
5351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5352 * @param pResult The FPU operation result to push.
5353 * @param uFpuOpcode The FPU opcode value.
5354 */
5355void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5356{
5357 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5358 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5359 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5360}
5361
5362
5363/**
5364 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5365 * and sets FPUDP and FPUDS.
5366 *
5367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5368 * @param pResult The FPU operation result to push.
5369 * @param iEffSeg The effective segment register.
5370 * @param GCPtrEff The effective address relative to @a iEffSeg.
5371 * @param uFpuOpcode The FPU opcode value.
5372 */
5373void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5374 uint16_t uFpuOpcode) RT_NOEXCEPT
5375{
5376 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5377 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5378 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5379 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5380}
5381
5382
5383/**
5384 * Replace ST0 with the first value and push the second onto the FPU stack,
5385 * unless a pending exception prevents it.
5386 *
5387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5388 * @param pResult The FPU operation result to store and push.
5389 * @param uFpuOpcode The FPU opcode value.
5390 */
5391void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5392{
5393 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5394 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5395
5396 /* Update FSW and bail if there are pending exceptions afterwards. */
5397 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5398 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5399 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5400 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5401 {
5402 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5403 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5404 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5405 pFpuCtx->FSW = fFsw;
5406 return;
5407 }
5408
5409 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5410 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5411 {
5412 /* All is fine, push the actual value. */
5413 pFpuCtx->FTW |= RT_BIT(iNewTop);
5414 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5415 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5416 }
5417 else if (pFpuCtx->FCW & X86_FCW_IM)
5418 {
5419 /* Masked stack overflow, push QNaN. */
5420 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5421 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5422 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5423 }
5424 else
5425 {
5426 /* Raise stack overflow, don't push anything. */
5427 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5428 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5429 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5430 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5431 return;
5432 }
5433
5434 fFsw &= ~X86_FSW_TOP_MASK;
5435 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5436 pFpuCtx->FSW = fFsw;
5437
5438 iemFpuRotateStackPush(pFpuCtx);
5439}
5440
5441
5442/**
5443 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5444 * FOP.
5445 *
5446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5447 * @param pResult The result to store.
5448 * @param iStReg Which FPU register to store it in.
5449 * @param uFpuOpcode The FPU opcode value.
5450 */
5451void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5452{
5453 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5454 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5455 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5456}
5457
5458
5459/**
5460 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5461 * FOP, and then pops the stack.
5462 *
5463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5464 * @param pResult The result to store.
5465 * @param iStReg Which FPU register to store it in.
5466 * @param uFpuOpcode The FPU opcode value.
5467 */
5468void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5469{
5470 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5471 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5472 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5473 iemFpuMaybePopOne(pFpuCtx);
5474}
5475
5476
5477/**
5478 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5479 * FPUDP, and FPUDS.
5480 *
5481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5482 * @param pResult The result to store.
5483 * @param iStReg Which FPU register to store it in.
5484 * @param iEffSeg The effective memory operand selector register.
5485 * @param GCPtrEff The effective memory operand offset.
5486 * @param uFpuOpcode The FPU opcode value.
5487 */
5488void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5489 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5490{
5491 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5492 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5493 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5494 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5495}
5496
5497
5498/**
5499 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5500 * FPUDP, and FPUDS, and then pops the stack.
5501 *
5502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5503 * @param pResult The result to store.
5504 * @param iStReg Which FPU register to store it in.
5505 * @param iEffSeg The effective memory operand selector register.
5506 * @param GCPtrEff The effective memory operand offset.
5507 * @param uFpuOpcode The FPU opcode value.
5508 */
5509void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5510 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5511{
5512 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5513 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5514 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5515 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5516 iemFpuMaybePopOne(pFpuCtx);
5517}
5518
5519
5520/**
5521 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5522 *
5523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5524 * @param uFpuOpcode The FPU opcode value.
5525 */
5526void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5527{
5528 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5529 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5530}
5531
5532
5533/**
5534 * Updates the FSW, FOP, FPUIP, and FPUCS.
5535 *
5536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5537 * @param u16FSW The FSW from the current instruction.
5538 * @param uFpuOpcode The FPU opcode value.
5539 */
5540void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5541{
5542 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5543 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5544 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5545}
5546
5547
5548/**
5549 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5550 *
5551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5552 * @param u16FSW The FSW from the current instruction.
5553 * @param uFpuOpcode The FPU opcode value.
5554 */
5555void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5556{
5557 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5558 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5559 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5560 iemFpuMaybePopOne(pFpuCtx);
5561}
5562
5563
5564/**
5565 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5566 *
5567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5568 * @param u16FSW The FSW from the current instruction.
5569 * @param iEffSeg The effective memory operand selector register.
5570 * @param GCPtrEff The effective memory operand offset.
5571 * @param uFpuOpcode The FPU opcode value.
5572 */
5573void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5574{
5575 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5576 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5577 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5578 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5579}
5580
5581
5582/**
5583 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5584 *
5585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5586 * @param u16FSW The FSW from the current instruction.
5587 * @param uFpuOpcode The FPU opcode value.
5588 */
5589void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5590{
5591 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5592 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5593 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5594 iemFpuMaybePopOne(pFpuCtx);
5595 iemFpuMaybePopOne(pFpuCtx);
5596}
5597
5598
5599/**
5600 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5601 *
5602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5603 * @param u16FSW The FSW from the current instruction.
5604 * @param iEffSeg The effective memory operand selector register.
5605 * @param GCPtrEff The effective memory operand offset.
5606 * @param uFpuOpcode The FPU opcode value.
5607 */
5608void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5609 uint16_t uFpuOpcode) RT_NOEXCEPT
5610{
5611 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5612 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5613 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5614 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5615 iemFpuMaybePopOne(pFpuCtx);
5616}
5617
5618
5619/**
5620 * Worker routine for raising an FPU stack underflow exception.
5621 *
5622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5623 * @param pFpuCtx The FPU context.
5624 * @param iStReg The stack register being accessed.
5625 */
5626static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5627{
5628 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5629 if (pFpuCtx->FCW & X86_FCW_IM)
5630 {
5631 /* Masked underflow. */
5632 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5633 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5634 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5635 if (iStReg != UINT8_MAX)
5636 {
5637 pFpuCtx->FTW |= RT_BIT(iReg);
5638 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5639 }
5640 }
5641 else
5642 {
5643 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5644 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5645 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5646 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5647 }
5648 RT_NOREF(pVCpu);
5649}
5650
5651
5652/**
5653 * Raises a FPU stack underflow exception.
5654 *
5655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5656 * @param iStReg The destination register that should be loaded
5657 * with QNaN if \#IS is not masked. Specify
5658 * UINT8_MAX if none (like for fcom).
5659 * @param uFpuOpcode The FPU opcode value.
5660 */
5661void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5662{
5663 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5664 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5665 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5666}
5667
5668
5669void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5670{
5671 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5672 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5673 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5674 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5675}
5676
5677
5678void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5679{
5680 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5681 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5682 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5683 iemFpuMaybePopOne(pFpuCtx);
5684}
5685
5686
5687void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5688 uint16_t uFpuOpcode) RT_NOEXCEPT
5689{
5690 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5691 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5692 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5693 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5694 iemFpuMaybePopOne(pFpuCtx);
5695}
5696
5697
5698void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5699{
5700 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5701 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5702 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5703 iemFpuMaybePopOne(pFpuCtx);
5704 iemFpuMaybePopOne(pFpuCtx);
5705}
5706
5707
5708void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5709{
5710 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5711 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5712
5713 if (pFpuCtx->FCW & X86_FCW_IM)
5714 {
5715 /* Masked overflow - Push QNaN. */
5716 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5717 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5718 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5719 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5720 pFpuCtx->FTW |= RT_BIT(iNewTop);
5721 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5722 iemFpuRotateStackPush(pFpuCtx);
5723 }
5724 else
5725 {
5726 /* Exception pending - don't change TOP or the register stack. */
5727 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5728 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5729 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5730 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5731 }
5732}
5733
5734
5735void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5736{
5737 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5738 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5739
5740 if (pFpuCtx->FCW & X86_FCW_IM)
5741 {
5742 /* Masked overflow - Push QNaN. */
5743 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5744 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5745 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5746 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5747 pFpuCtx->FTW |= RT_BIT(iNewTop);
5748 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5749 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5750 iemFpuRotateStackPush(pFpuCtx);
5751 }
5752 else
5753 {
5754 /* Exception pending - don't change TOP or the register stack. */
5755 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5756 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5757 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5758 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5759 }
5760}
5761
5762
5763/**
5764 * Worker routine for raising an FPU stack overflow exception on a push.
5765 *
5766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5767 * @param pFpuCtx The FPU context.
5768 */
5769static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5770{
5771 if (pFpuCtx->FCW & X86_FCW_IM)
5772 {
5773 /* Masked overflow. */
5774 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5775 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5776 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5777 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5778 pFpuCtx->FTW |= RT_BIT(iNewTop);
5779 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5780 iemFpuRotateStackPush(pFpuCtx);
5781 }
5782 else
5783 {
5784 /* Exception pending - don't change TOP or the register stack. */
5785 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5786 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5787 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5788 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5789 }
5790 RT_NOREF(pVCpu);
5791}
5792
5793
5794/**
5795 * Raises a FPU stack overflow exception on a push.
5796 *
5797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5798 * @param uFpuOpcode The FPU opcode value.
5799 */
5800void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5801{
5802 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5803 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5804 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5805}
5806
5807
5808/**
5809 * Raises a FPU stack overflow exception on a push with a memory operand.
5810 *
5811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5812 * @param iEffSeg The effective memory operand selector register.
5813 * @param GCPtrEff The effective memory operand offset.
5814 * @param uFpuOpcode The FPU opcode value.
5815 */
5816void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5817{
5818 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5819 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5820 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5821 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5822}
5823
5824/** @} */
5825
5826
5827/** @name Memory access.
5828 *
5829 * @{
5830 */
5831
5832#undef LOG_GROUP
5833#define LOG_GROUP LOG_GROUP_IEM_MEM
5834
5835/**
5836 * Updates the IEMCPU::cbWritten counter if applicable.
5837 *
5838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5839 * @param fAccess The access being accounted for.
5840 * @param cbMem The access size.
5841 */
5842DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5843{
5844 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5845 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5846 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5847}
5848
5849
5850/**
5851 * Applies the segment limit, base and attributes.
5852 *
5853 * This may raise a \#GP or \#SS.
5854 *
5855 * @returns VBox strict status code.
5856 *
5857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5858 * @param fAccess The kind of access which is being performed.
5859 * @param iSegReg The index of the segment register to apply.
5860 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5861 * TSS, ++).
5862 * @param cbMem The access size.
5863 * @param pGCPtrMem Pointer to the guest memory address to apply
5864 * segmentation to. Input and output parameter.
5865 */
5866VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5867{
5868 if (iSegReg == UINT8_MAX)
5869 return VINF_SUCCESS;
5870
5871 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5872 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5873 switch (IEM_GET_CPU_MODE(pVCpu))
5874 {
5875 case IEMMODE_16BIT:
5876 case IEMMODE_32BIT:
5877 {
5878 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5879 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5880
5881 if ( pSel->Attr.n.u1Present
5882 && !pSel->Attr.n.u1Unusable)
5883 {
5884 Assert(pSel->Attr.n.u1DescType);
5885 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5886 {
5887 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5888 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5889 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5890
5891 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5892 {
5893 /** @todo CPL check. */
5894 }
5895
5896 /*
5897 * There are two kinds of data selectors, normal and expand down.
5898 */
5899 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5900 {
5901 if ( GCPtrFirst32 > pSel->u32Limit
5902 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5903 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5904 }
5905 else
5906 {
5907 /*
5908 * The upper boundary is defined by the B bit, not the G bit!
5909 */
5910 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5911 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5912 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5913 }
5914 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5915 }
5916 else
5917 {
5918 /*
5919 * Code selector and usually be used to read thru, writing is
5920 * only permitted in real and V8086 mode.
5921 */
5922 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5923 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5924 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5925 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5926 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5927
5928 if ( GCPtrFirst32 > pSel->u32Limit
5929 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5930 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5931
5932 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5933 {
5934 /** @todo CPL check. */
5935 }
5936
5937 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5938 }
5939 }
5940 else
5941 return iemRaiseGeneralProtectionFault0(pVCpu);
5942 return VINF_SUCCESS;
5943 }
5944
5945 case IEMMODE_64BIT:
5946 {
5947 RTGCPTR GCPtrMem = *pGCPtrMem;
5948 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5949 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5950
5951 Assert(cbMem >= 1);
5952 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5953 return VINF_SUCCESS;
5954 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5955 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5956 return iemRaiseGeneralProtectionFault0(pVCpu);
5957 }
5958
5959 default:
5960 AssertFailedReturn(VERR_IEM_IPE_7);
5961 }
5962}
5963
5964
5965/**
5966 * Translates a virtual address to a physical physical address and checks if we
5967 * can access the page as specified.
5968 *
5969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5970 * @param GCPtrMem The virtual address.
5971 * @param cbAccess The access size, for raising \#PF correctly for
5972 * FXSAVE and such.
5973 * @param fAccess The intended access.
5974 * @param pGCPhysMem Where to return the physical address.
5975 */
5976VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5977 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5978{
5979 /** @todo Need a different PGM interface here. We're currently using
5980 * generic / REM interfaces. this won't cut it for R0. */
5981 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5982 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5983 * here. */
5984 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5985 PGMPTWALKFAST WalkFast;
5986 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
5987 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
5988 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
5989 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
5990 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
5991 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
5992 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5993 fQPage |= PGMQPAGE_F_USER_MODE;
5994 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
5995 if (RT_SUCCESS(rc))
5996 {
5997 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
5998
5999 /* If the page is writable and does not have the no-exec bit set, all
6000 access is allowed. Otherwise we'll have to check more carefully... */
6001 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
6002 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
6003 || (WalkFast.fEffective & X86_PTE_RW)
6004 || ( ( IEM_GET_CPL(pVCpu) != 3
6005 || (fAccess & IEM_ACCESS_WHAT_SYS))
6006 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
6007 && ( (WalkFast.fEffective & X86_PTE_US)
6008 || IEM_GET_CPL(pVCpu) != 3
6009 || (fAccess & IEM_ACCESS_WHAT_SYS) )
6010 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
6011 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
6012 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
6013 )
6014 );
6015
6016 /* PGMGstQueryPageFast sets the A & D bits. */
6017 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6018 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
6019
6020 *pGCPhysMem = WalkFast.GCPhys;
6021 return VINF_SUCCESS;
6022 }
6023
6024 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6025 /** @todo Check unassigned memory in unpaged mode. */
6026#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6027 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6028 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6029#endif
6030 *pGCPhysMem = NIL_RTGCPHYS;
6031 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
6032}
6033
6034#if 0 /*unused*/
6035/**
6036 * Looks up a memory mapping entry.
6037 *
6038 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6040 * @param pvMem The memory address.
6041 * @param fAccess The access to.
6042 */
6043DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
6044{
6045 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6046 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6047 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
6048 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6049 return 0;
6050 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
6051 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6052 return 1;
6053 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
6054 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6055 return 2;
6056 return VERR_NOT_FOUND;
6057}
6058#endif
6059
6060/**
6061 * Finds a free memmap entry when using iNextMapping doesn't work.
6062 *
6063 * @returns Memory mapping index, 1024 on failure.
6064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6065 */
6066static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
6067{
6068 /*
6069 * The easy case.
6070 */
6071 if (pVCpu->iem.s.cActiveMappings == 0)
6072 {
6073 pVCpu->iem.s.iNextMapping = 1;
6074 return 0;
6075 }
6076
6077 /* There should be enough mappings for all instructions. */
6078 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
6079
6080 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
6081 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6082 return i;
6083
6084 AssertFailedReturn(1024);
6085}
6086
6087
6088/**
6089 * Commits a bounce buffer that needs writing back and unmaps it.
6090 *
6091 * @returns Strict VBox status code.
6092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6093 * @param iMemMap The index of the buffer to commit.
6094 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6095 * Always false in ring-3, obviously.
6096 */
6097static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
6098{
6099 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6100 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6101#ifdef IN_RING3
6102 Assert(!fPostponeFail);
6103 RT_NOREF_PV(fPostponeFail);
6104#endif
6105
6106 /*
6107 * Do the writing.
6108 */
6109 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6110 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
6111 {
6112 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
6113 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6114 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6115 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6116 {
6117 /*
6118 * Carefully and efficiently dealing with access handler return
6119 * codes make this a little bloated.
6120 */
6121 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6122 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6123 pbBuf,
6124 cbFirst,
6125 PGMACCESSORIGIN_IEM);
6126 if (rcStrict == VINF_SUCCESS)
6127 {
6128 if (cbSecond)
6129 {
6130 rcStrict = PGMPhysWrite(pVM,
6131 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6132 pbBuf + cbFirst,
6133 cbSecond,
6134 PGMACCESSORIGIN_IEM);
6135 if (rcStrict == VINF_SUCCESS)
6136 { /* nothing */ }
6137 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6138 {
6139 LogEx(LOG_GROUP_IEM,
6140 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6141 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6142 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6143 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6144 }
6145#ifndef IN_RING3
6146 else if (fPostponeFail)
6147 {
6148 LogEx(LOG_GROUP_IEM,
6149 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6150 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6151 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6152 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6153 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6154 return iemSetPassUpStatus(pVCpu, rcStrict);
6155 }
6156#endif
6157 else
6158 {
6159 LogEx(LOG_GROUP_IEM,
6160 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6161 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6162 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6163 return rcStrict;
6164 }
6165 }
6166 }
6167 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6168 {
6169 if (!cbSecond)
6170 {
6171 LogEx(LOG_GROUP_IEM,
6172 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6173 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6174 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6175 }
6176 else
6177 {
6178 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6179 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6180 pbBuf + cbFirst,
6181 cbSecond,
6182 PGMACCESSORIGIN_IEM);
6183 if (rcStrict2 == VINF_SUCCESS)
6184 {
6185 LogEx(LOG_GROUP_IEM,
6186 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6187 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6188 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6189 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6190 }
6191 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6192 {
6193 LogEx(LOG_GROUP_IEM,
6194 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6195 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6196 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6197 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6198 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6199 }
6200#ifndef IN_RING3
6201 else if (fPostponeFail)
6202 {
6203 LogEx(LOG_GROUP_IEM,
6204 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6205 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6206 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6207 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6208 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6209 return iemSetPassUpStatus(pVCpu, rcStrict);
6210 }
6211#endif
6212 else
6213 {
6214 LogEx(LOG_GROUP_IEM,
6215 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6216 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6217 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6218 return rcStrict2;
6219 }
6220 }
6221 }
6222#ifndef IN_RING3
6223 else if (fPostponeFail)
6224 {
6225 LogEx(LOG_GROUP_IEM,
6226 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6227 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6228 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6229 if (!cbSecond)
6230 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6231 else
6232 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6233 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6234 return iemSetPassUpStatus(pVCpu, rcStrict);
6235 }
6236#endif
6237 else
6238 {
6239 LogEx(LOG_GROUP_IEM,
6240 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6241 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6242 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6243 return rcStrict;
6244 }
6245 }
6246 else
6247 {
6248 /*
6249 * No access handlers, much simpler.
6250 */
6251 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6252 if (RT_SUCCESS(rc))
6253 {
6254 if (cbSecond)
6255 {
6256 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6257 if (RT_SUCCESS(rc))
6258 { /* likely */ }
6259 else
6260 {
6261 LogEx(LOG_GROUP_IEM,
6262 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6263 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6264 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6265 return rc;
6266 }
6267 }
6268 }
6269 else
6270 {
6271 LogEx(LOG_GROUP_IEM,
6272 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6273 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6274 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6275 return rc;
6276 }
6277 }
6278 }
6279
6280#if defined(IEM_LOG_MEMORY_WRITES)
6281 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6282 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6283 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6284 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6285 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6286 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6287
6288 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6289 g_cbIemWrote = cbWrote;
6290 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6291#endif
6292
6293 /*
6294 * Free the mapping entry.
6295 */
6296 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6297 Assert(pVCpu->iem.s.cActiveMappings != 0);
6298 pVCpu->iem.s.cActiveMappings--;
6299 return VINF_SUCCESS;
6300}
6301
6302
6303/**
6304 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6305 */
6306DECL_FORCE_INLINE(uint32_t)
6307iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6308{
6309 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6310 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6311 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6312 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6313}
6314
6315
6316/**
6317 * iemMemMap worker that deals with a request crossing pages.
6318 */
6319static VBOXSTRICTRC
6320iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6321 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6322{
6323 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6324 Assert(cbMem <= GUEST_PAGE_SIZE);
6325
6326 /*
6327 * Do the address translations.
6328 */
6329 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6330 RTGCPHYS GCPhysFirst;
6331 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6332 if (rcStrict != VINF_SUCCESS)
6333 return rcStrict;
6334 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6335
6336 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6337 RTGCPHYS GCPhysSecond;
6338 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6339 cbSecondPage, fAccess, &GCPhysSecond);
6340 if (rcStrict != VINF_SUCCESS)
6341 return rcStrict;
6342 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6343 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6344
6345 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6346
6347 /*
6348 * Check for data breakpoints.
6349 */
6350 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6351 { /* likely */ }
6352 else
6353 {
6354 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6355 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6356 cbSecondPage, fAccess);
6357 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6358 if (fDataBps > 1)
6359 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6360 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6361 }
6362
6363 /*
6364 * Read in the current memory content if it's a read, execute or partial
6365 * write access.
6366 */
6367 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6368
6369 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6370 {
6371 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6372 {
6373 /*
6374 * Must carefully deal with access handler status codes here,
6375 * makes the code a bit bloated.
6376 */
6377 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6378 if (rcStrict == VINF_SUCCESS)
6379 {
6380 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6381 if (rcStrict == VINF_SUCCESS)
6382 { /*likely */ }
6383 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6384 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6385 else
6386 {
6387 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6388 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6389 return rcStrict;
6390 }
6391 }
6392 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6393 {
6394 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6395 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6396 {
6397 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6398 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6399 }
6400 else
6401 {
6402 LogEx(LOG_GROUP_IEM,
6403 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6404 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6405 return rcStrict2;
6406 }
6407 }
6408 else
6409 {
6410 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6411 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6412 return rcStrict;
6413 }
6414 }
6415 else
6416 {
6417 /*
6418 * No informational status codes here, much more straight forward.
6419 */
6420 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6421 if (RT_SUCCESS(rc))
6422 {
6423 Assert(rc == VINF_SUCCESS);
6424 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6425 if (RT_SUCCESS(rc))
6426 Assert(rc == VINF_SUCCESS);
6427 else
6428 {
6429 LogEx(LOG_GROUP_IEM,
6430 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6431 return rc;
6432 }
6433 }
6434 else
6435 {
6436 LogEx(LOG_GROUP_IEM,
6437 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6438 return rc;
6439 }
6440 }
6441 }
6442#ifdef VBOX_STRICT
6443 else
6444 memset(pbBuf, 0xcc, cbMem);
6445 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6446 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6447#endif
6448 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6449
6450 /*
6451 * Commit the bounce buffer entry.
6452 */
6453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6455 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6456 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6457 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6458 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6459 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6460 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6461 pVCpu->iem.s.cActiveMappings++;
6462
6463 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6464 *ppvMem = pbBuf;
6465 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6466 return VINF_SUCCESS;
6467}
6468
6469
6470/**
6471 * iemMemMap woker that deals with iemMemPageMap failures.
6472 */
6473static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6474 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6475{
6476 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6477
6478 /*
6479 * Filter out conditions we can handle and the ones which shouldn't happen.
6480 */
6481 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6482 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6483 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6484 {
6485 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6486 return rcMap;
6487 }
6488 pVCpu->iem.s.cPotentialExits++;
6489
6490 /*
6491 * Read in the current memory content if it's a read, execute or partial
6492 * write access.
6493 */
6494 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6495 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6496 {
6497 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6498 memset(pbBuf, 0xff, cbMem);
6499 else
6500 {
6501 int rc;
6502 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6503 {
6504 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6505 if (rcStrict == VINF_SUCCESS)
6506 { /* nothing */ }
6507 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6508 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6509 else
6510 {
6511 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6512 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6513 return rcStrict;
6514 }
6515 }
6516 else
6517 {
6518 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6519 if (RT_SUCCESS(rc))
6520 { /* likely */ }
6521 else
6522 {
6523 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6524 GCPhysFirst, rc));
6525 return rc;
6526 }
6527 }
6528 }
6529 }
6530#ifdef VBOX_STRICT
6531 else
6532 memset(pbBuf, 0xcc, cbMem);
6533#endif
6534#ifdef VBOX_STRICT
6535 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6536 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6537#endif
6538
6539 /*
6540 * Commit the bounce buffer entry.
6541 */
6542 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6543 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6544 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6545 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6546 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6547 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6548 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6549 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6550 pVCpu->iem.s.cActiveMappings++;
6551
6552 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6553 *ppvMem = pbBuf;
6554 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6555 return VINF_SUCCESS;
6556}
6557
6558
6559
6560/**
6561 * Maps the specified guest memory for the given kind of access.
6562 *
6563 * This may be using bounce buffering of the memory if it's crossing a page
6564 * boundary or if there is an access handler installed for any of it. Because
6565 * of lock prefix guarantees, we're in for some extra clutter when this
6566 * happens.
6567 *
6568 * This may raise a \#GP, \#SS, \#PF or \#AC.
6569 *
6570 * @returns VBox strict status code.
6571 *
6572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6573 * @param ppvMem Where to return the pointer to the mapped memory.
6574 * @param pbUnmapInfo Where to return unmap info to be passed to
6575 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6576 * done.
6577 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6578 * 8, 12, 16, 32 or 512. When used by string operations
6579 * it can be up to a page.
6580 * @param iSegReg The index of the segment register to use for this
6581 * access. The base and limits are checked. Use UINT8_MAX
6582 * to indicate that no segmentation is required (for IDT,
6583 * GDT and LDT accesses).
6584 * @param GCPtrMem The address of the guest memory.
6585 * @param fAccess How the memory is being accessed. The
6586 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6587 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6588 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6589 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6590 * set.
6591 * @param uAlignCtl Alignment control:
6592 * - Bits 15:0 is the alignment mask.
6593 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6594 * IEM_MEMMAP_F_ALIGN_SSE, and
6595 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6596 * Pass zero to skip alignment.
6597 */
6598VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6599 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6600{
6601 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6602
6603 /*
6604 * Check the input and figure out which mapping entry to use.
6605 */
6606 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6607 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6608 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6609 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6610 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6611
6612 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6613 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6614 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6615 {
6616 iMemMap = iemMemMapFindFree(pVCpu);
6617 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6618 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6619 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6620 pVCpu->iem.s.aMemMappings[2].fAccess),
6621 VERR_IEM_IPE_9);
6622 }
6623
6624 /*
6625 * Map the memory, checking that we can actually access it. If something
6626 * slightly complicated happens, fall back on bounce buffering.
6627 */
6628 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6629 if (rcStrict == VINF_SUCCESS)
6630 { /* likely */ }
6631 else
6632 return rcStrict;
6633
6634 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6635 { /* likely */ }
6636 else
6637 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6638
6639 /*
6640 * Alignment check.
6641 */
6642 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6643 { /* likelyish */ }
6644 else
6645 {
6646 /* Misaligned access. */
6647 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6648 {
6649 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6650 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6651 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6652 {
6653 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6654
6655 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6656 { /* likely */ }
6657 else
6658 return iemRaiseAlignmentCheckException(pVCpu);
6659 }
6660 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6661 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6662 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6663 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6664 * that's what FXSAVE does on a 10980xe. */
6665 && iemMemAreAlignmentChecksEnabled(pVCpu))
6666 return iemRaiseAlignmentCheckException(pVCpu);
6667 else
6668 return iemRaiseGeneralProtectionFault0(pVCpu);
6669 }
6670
6671#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6672 /* If the access is atomic there are host platform alignmnet restrictions
6673 we need to conform with. */
6674 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6675# if defined(RT_ARCH_AMD64)
6676 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6677# elif defined(RT_ARCH_ARM64)
6678 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6679# else
6680# error port me
6681# endif
6682 )
6683 { /* okay */ }
6684 else
6685 {
6686 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6687 pVCpu->iem.s.cMisalignedAtomics += 1;
6688 return VINF_EM_EMULATE_SPLIT_LOCK;
6689 }
6690#endif
6691 }
6692
6693#ifdef IEM_WITH_DATA_TLB
6694 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6695
6696 /*
6697 * Get the TLB entry for this page and check PT flags.
6698 *
6699 * We reload the TLB entry if we need to set the dirty bit (accessed
6700 * should in theory always be set).
6701 */
6702 uint8_t *pbMem = NULL;
6703 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6704 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6705 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6706 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6707 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6708 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6709 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6710 {
6711# ifdef IEM_WITH_TLB_STATISTICS
6712 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6713#endif
6714
6715 /* If the page is either supervisor only or non-writable, we need to do
6716 more careful access checks. */
6717 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6718 {
6719 /* Write to read only memory? */
6720 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6721 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6722 && ( ( IEM_GET_CPL(pVCpu) == 3
6723 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6724 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6725 {
6726 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6727 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6728 }
6729
6730 /* Kernel memory accessed by userland? */
6731 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6732 && IEM_GET_CPL(pVCpu) == 3
6733 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6734 {
6735 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6736 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6737 }
6738 }
6739
6740 /* Look up the physical page info if necessary. */
6741 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6742# ifdef IN_RING3
6743 pbMem = pTlbe->pbMappingR3;
6744# else
6745 pbMem = NULL;
6746# endif
6747 else
6748 {
6749 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6750 { /* likely */ }
6751 else
6752 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6753 pTlbe->pbMappingR3 = NULL;
6754 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6755 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6756 &pbMem, &pTlbe->fFlagsAndPhysRev);
6757 AssertRCReturn(rc, rc);
6758# ifdef IN_RING3
6759 pTlbe->pbMappingR3 = pbMem;
6760# endif
6761 }
6762 }
6763 else
6764 {
6765 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
6766
6767 /* This page table walking will set A bits as required by the access while performing the walk.
6768 ASSUMES these are set when the address is translated rather than on commit... */
6769 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6770 PGMPTWALKFAST WalkFast;
6771 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6772 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6773 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6774 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6775 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6776 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6777 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6778 fQPage |= PGMQPAGE_F_USER_MODE;
6779 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6780 if (RT_SUCCESS(rc))
6781 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6782 else
6783 {
6784 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6785# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6786 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6787 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6788# endif
6789 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6790 }
6791
6792 uint32_t fDataBps;
6793 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
6794 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
6795 {
6796 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
6797 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
6798 {
6799 pTlbe--;
6800 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
6801 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6802 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6803 IEMTLBTRACE_LOAD(pVCpu, GCPtrMem, true);
6804 }
6805 else
6806 {
6807 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
6808 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
6809 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6810 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6811 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, true);
6812 }
6813 }
6814 else
6815 {
6816 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
6817 to the page with the data access breakpoint armed on it to pass thru here. */
6818 if (fDataBps > 1)
6819 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6820 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6821 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6822 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
6823 pTlbe->uTag = uTagNoRev;
6824 }
6825 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
6826 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
6827 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6828 pTlbe->GCPhys = GCPhysPg;
6829 pTlbe->pbMappingR3 = NULL;
6830 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6831 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6832 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6833 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6834 || IEM_GET_CPL(pVCpu) != 3
6835 || (fAccess & IEM_ACCESS_WHAT_SYS));
6836
6837 /* Resolve the physical address. */
6838 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6839 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6840 &pbMem, &pTlbe->fFlagsAndPhysRev);
6841 AssertRCReturn(rc, rc);
6842# ifdef IN_RING3
6843 pTlbe->pbMappingR3 = pbMem;
6844# endif
6845 }
6846
6847 /*
6848 * Check the physical page level access and mapping.
6849 */
6850 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6851 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6852 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6853 { /* probably likely */ }
6854 else
6855 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6856 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6857 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6858 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6859 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6860 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6861
6862 if (pbMem)
6863 {
6864 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6865 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6866 fAccess |= IEM_ACCESS_NOT_LOCKED;
6867 }
6868 else
6869 {
6870 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6871 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6872 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6873 if (rcStrict != VINF_SUCCESS)
6874 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6875 }
6876
6877 void * const pvMem = pbMem;
6878
6879 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6880 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6881 if (fAccess & IEM_ACCESS_TYPE_READ)
6882 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6883
6884#else /* !IEM_WITH_DATA_TLB */
6885
6886 RTGCPHYS GCPhysFirst;
6887 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6888 if (rcStrict != VINF_SUCCESS)
6889 return rcStrict;
6890
6891 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6892 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6893 if (fAccess & IEM_ACCESS_TYPE_READ)
6894 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6895
6896 void *pvMem;
6897 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6898 if (rcStrict != VINF_SUCCESS)
6899 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6900
6901#endif /* !IEM_WITH_DATA_TLB */
6902
6903 /*
6904 * Fill in the mapping table entry.
6905 */
6906 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6907 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6908 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6909 pVCpu->iem.s.cActiveMappings += 1;
6910
6911 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6912 *ppvMem = pvMem;
6913 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6914 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6915 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6916
6917 return VINF_SUCCESS;
6918}
6919
6920
6921/**
6922 * Commits the guest memory if bounce buffered and unmaps it.
6923 *
6924 * @returns Strict VBox status code.
6925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6926 * @param bUnmapInfo Unmap info set by iemMemMap.
6927 */
6928VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6929{
6930 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6931 AssertMsgReturn( (bUnmapInfo & 0x08)
6932 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6933 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6934 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6935 VERR_NOT_FOUND);
6936
6937 /* If it's bounce buffered, we may need to write back the buffer. */
6938 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6939 {
6940 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6941 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6942 }
6943 /* Otherwise unlock it. */
6944 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6945 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6946
6947 /* Free the entry. */
6948 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6949 Assert(pVCpu->iem.s.cActiveMappings != 0);
6950 pVCpu->iem.s.cActiveMappings--;
6951 return VINF_SUCCESS;
6952}
6953
6954
6955/**
6956 * Rolls back the guest memory (conceptually only) and unmaps it.
6957 *
6958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6959 * @param bUnmapInfo Unmap info set by iemMemMap.
6960 */
6961void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6962{
6963 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6964 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6965 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6966 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6967 == ((unsigned)bUnmapInfo >> 4),
6968 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6969
6970 /* Unlock it if necessary. */
6971 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6972 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6973
6974 /* Free the entry. */
6975 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6976 Assert(pVCpu->iem.s.cActiveMappings != 0);
6977 pVCpu->iem.s.cActiveMappings--;
6978}
6979
6980#ifdef IEM_WITH_SETJMP
6981
6982/**
6983 * Maps the specified guest memory for the given kind of access, longjmp on
6984 * error.
6985 *
6986 * This may be using bounce buffering of the memory if it's crossing a page
6987 * boundary or if there is an access handler installed for any of it. Because
6988 * of lock prefix guarantees, we're in for some extra clutter when this
6989 * happens.
6990 *
6991 * This may raise a \#GP, \#SS, \#PF or \#AC.
6992 *
6993 * @returns Pointer to the mapped memory.
6994 *
6995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6996 * @param bUnmapInfo Where to return unmap info to be passed to
6997 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6998 * iemMemCommitAndUnmapWoSafeJmp,
6999 * iemMemCommitAndUnmapRoSafeJmp,
7000 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
7001 * when done.
7002 * @param cbMem The number of bytes to map. This is usually 1,
7003 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7004 * string operations it can be up to a page.
7005 * @param iSegReg The index of the segment register to use for
7006 * this access. The base and limits are checked.
7007 * Use UINT8_MAX to indicate that no segmentation
7008 * is required (for IDT, GDT and LDT accesses).
7009 * @param GCPtrMem The address of the guest memory.
7010 * @param fAccess How the memory is being accessed. The
7011 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
7012 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
7013 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
7014 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
7015 * set.
7016 * @param uAlignCtl Alignment control:
7017 * - Bits 15:0 is the alignment mask.
7018 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
7019 * IEM_MEMMAP_F_ALIGN_SSE, and
7020 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
7021 * Pass zero to skip alignment.
7022 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
7023 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
7024 * needs counting as such in the statistics.
7025 */
7026template<bool a_fSafeCall = false>
7027static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7028 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7029{
7030 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
7031
7032 /*
7033 * Check the input, check segment access and adjust address
7034 * with segment base.
7035 */
7036 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7037 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
7038 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7039
7040 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7041 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7042 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7043
7044 /*
7045 * Alignment check.
7046 */
7047 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
7048 { /* likelyish */ }
7049 else
7050 {
7051 /* Misaligned access. */
7052 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7053 {
7054 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
7055 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
7056 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
7057 {
7058 AssertCompile(X86_CR0_AM == X86_EFL_AC);
7059
7060 if (iemMemAreAlignmentChecksEnabled(pVCpu))
7061 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7062 }
7063 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
7064 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
7065 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
7066 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
7067 * that's what FXSAVE does on a 10980xe. */
7068 && iemMemAreAlignmentChecksEnabled(pVCpu))
7069 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7070 else
7071 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
7072 }
7073
7074#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
7075 /* If the access is atomic there are host platform alignmnet restrictions
7076 we need to conform with. */
7077 if ( !(fAccess & IEM_ACCESS_ATOMIC)
7078# if defined(RT_ARCH_AMD64)
7079 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
7080# elif defined(RT_ARCH_ARM64)
7081 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
7082# else
7083# error port me
7084# endif
7085 )
7086 { /* okay */ }
7087 else
7088 {
7089 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
7090 pVCpu->iem.s.cMisalignedAtomics += 1;
7091 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
7092 }
7093#endif
7094 }
7095
7096 /*
7097 * Figure out which mapping entry to use.
7098 */
7099 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
7100 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7101 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7102 {
7103 iMemMap = iemMemMapFindFree(pVCpu);
7104 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
7105 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
7106 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
7107 pVCpu->iem.s.aMemMappings[2].fAccess),
7108 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
7109 }
7110
7111 /*
7112 * Crossing a page boundary?
7113 */
7114 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
7115 { /* No (likely). */ }
7116 else
7117 {
7118 void *pvMem;
7119 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
7120 if (rcStrict == VINF_SUCCESS)
7121 return pvMem;
7122 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7123 }
7124
7125#ifdef IEM_WITH_DATA_TLB
7126 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
7127
7128 /*
7129 * Get the TLB entry for this page checking that it has the A & D bits
7130 * set as per fAccess flags.
7131 */
7132 /** @todo make the caller pass these in with fAccess. */
7133 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
7134 ? IEMTLBE_F_PT_NO_USER : 0;
7135 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
7136 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
7137 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
7138 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7139 ? IEMTLBE_F_PT_NO_WRITE : 0)
7140 : 0;
7141 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
7142 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
7143 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
7144 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
7145 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
7146 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
7147 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
7148 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
7149 {
7150# ifdef IEM_WITH_TLB_STATISTICS
7151 if (a_fSafeCall)
7152 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
7153 else
7154 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7155# endif
7156 }
7157 else
7158 {
7159 if (a_fSafeCall)
7160 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
7161 else
7162 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7163
7164 /* This page table walking will set A and D bits as required by the
7165 access while performing the walk.
7166 ASSUMES these are set when the address is translated rather than on commit... */
7167 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7168 PGMPTWALKFAST WalkFast;
7169 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7170 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7171 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7172 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7173 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7174 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7175 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7176 fQPage |= PGMQPAGE_F_USER_MODE;
7177 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7178 if (RT_SUCCESS(rc))
7179 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7180 else
7181 {
7182 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7183# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7184 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7185 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7186# endif
7187 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7188 }
7189
7190 uint32_t fDataBps;
7191 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7192 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7193 {
7194 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7195 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7196 {
7197 pTlbe--;
7198 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7199 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7200 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7201 IEMTLBTRACE_LOAD(pVCpu, GCPtrMem, true);
7202 }
7203 else
7204 {
7205 if (a_fSafeCall)
7206 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7207 else
7208 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7209 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7210 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7211 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7212 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, true);
7213 }
7214 }
7215 else
7216 {
7217 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7218 to the page with the data access breakpoint armed on it to pass thru here. */
7219 if (fDataBps > 1)
7220 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7221 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7222 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7223 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7224 pTlbe->uTag = uTagNoRev;
7225 }
7226 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7227 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7228 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7229 pTlbe->GCPhys = GCPhysPg;
7230 pTlbe->pbMappingR3 = NULL;
7231 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7232 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
7233 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7234
7235 /* Resolve the physical address. */
7236 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7237 uint8_t *pbMemFullLoad = NULL;
7238 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7239 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7240 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7241# ifdef IN_RING3
7242 pTlbe->pbMappingR3 = pbMemFullLoad;
7243# endif
7244 }
7245
7246 /*
7247 * Check the flags and physical revision.
7248 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7249 * just to keep the code structure simple (i.e. avoid gotos or similar).
7250 */
7251 uint8_t *pbMem;
7252 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7253 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7254# ifdef IN_RING3
7255 pbMem = pTlbe->pbMappingR3;
7256# else
7257 pbMem = NULL;
7258# endif
7259 else
7260 {
7261 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7262
7263 /*
7264 * Okay, something isn't quite right or needs refreshing.
7265 */
7266 /* Write to read only memory? */
7267 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7268 {
7269 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7270# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7271/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7272 * to trigger an \#PG or a VM nested paging exit here yet! */
7273 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7274 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7275# endif
7276 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7277 }
7278
7279 /* Kernel memory accessed by userland? */
7280 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7281 {
7282 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7283# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7284/** @todo TLB: See above. */
7285 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7286 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7287# endif
7288 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7289 }
7290
7291 /*
7292 * Check if the physical page info needs updating.
7293 */
7294 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7295# ifdef IN_RING3
7296 pbMem = pTlbe->pbMappingR3;
7297# else
7298 pbMem = NULL;
7299# endif
7300 else
7301 {
7302 pTlbe->pbMappingR3 = NULL;
7303 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7304 pbMem = NULL;
7305 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7306 &pbMem, &pTlbe->fFlagsAndPhysRev);
7307 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7308# ifdef IN_RING3
7309 pTlbe->pbMappingR3 = pbMem;
7310# endif
7311 }
7312
7313 /*
7314 * Check the physical page level access and mapping.
7315 */
7316 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7317 { /* probably likely */ }
7318 else
7319 {
7320 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7321 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7322 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7323 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7324 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7325 if (rcStrict == VINF_SUCCESS)
7326 return pbMem;
7327 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7328 }
7329 }
7330 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7331
7332 if (pbMem)
7333 {
7334 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7335 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7336 fAccess |= IEM_ACCESS_NOT_LOCKED;
7337 }
7338 else
7339 {
7340 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7341 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7342 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7343 if (rcStrict == VINF_SUCCESS)
7344 {
7345 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7346 return pbMem;
7347 }
7348 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7349 }
7350
7351 void * const pvMem = pbMem;
7352
7353 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7354 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7355 if (fAccess & IEM_ACCESS_TYPE_READ)
7356 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7357
7358#else /* !IEM_WITH_DATA_TLB */
7359
7360
7361 RTGCPHYS GCPhysFirst;
7362 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7363 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7364 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7365
7366 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7367 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7368 if (fAccess & IEM_ACCESS_TYPE_READ)
7369 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7370
7371 void *pvMem;
7372 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7373 if (rcStrict == VINF_SUCCESS)
7374 { /* likely */ }
7375 else
7376 {
7377 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7378 if (rcStrict == VINF_SUCCESS)
7379 return pvMem;
7380 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7381 }
7382
7383#endif /* !IEM_WITH_DATA_TLB */
7384
7385 /*
7386 * Fill in the mapping table entry.
7387 */
7388 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7389 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7390 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7391 pVCpu->iem.s.cActiveMappings++;
7392
7393 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7394
7395 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7396 return pvMem;
7397}
7398
7399
7400/** @see iemMemMapJmp */
7401static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7402 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7403{
7404 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7405}
7406
7407
7408/**
7409 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7410 *
7411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7412 * @param pvMem The mapping.
7413 * @param fAccess The kind of access.
7414 */
7415void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7416{
7417 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7418 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7419 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7420 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7421 == ((unsigned)bUnmapInfo >> 4),
7422 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7423
7424 /* If it's bounce buffered, we may need to write back the buffer. */
7425 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7426 {
7427 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7428 {
7429 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7430 if (rcStrict == VINF_SUCCESS)
7431 return;
7432 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7433 }
7434 }
7435 /* Otherwise unlock it. */
7436 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7437 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7438
7439 /* Free the entry. */
7440 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7441 Assert(pVCpu->iem.s.cActiveMappings != 0);
7442 pVCpu->iem.s.cActiveMappings--;
7443}
7444
7445
7446/** Fallback for iemMemCommitAndUnmapRwJmp. */
7447void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7448{
7449 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7450 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7451}
7452
7453
7454/** Fallback for iemMemCommitAndUnmapAtJmp. */
7455void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7456{
7457 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7458 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7459}
7460
7461
7462/** Fallback for iemMemCommitAndUnmapWoJmp. */
7463void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7464{
7465 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7466 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7467}
7468
7469
7470/** Fallback for iemMemCommitAndUnmapRoJmp. */
7471void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7472{
7473 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7474 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7475}
7476
7477
7478/** Fallback for iemMemRollbackAndUnmapWo. */
7479void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7480{
7481 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7482 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7483}
7484
7485#endif /* IEM_WITH_SETJMP */
7486
7487#ifndef IN_RING3
7488/**
7489 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7490 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7491 *
7492 * Allows the instruction to be completed and retired, while the IEM user will
7493 * return to ring-3 immediately afterwards and do the postponed writes there.
7494 *
7495 * @returns VBox status code (no strict statuses). Caller must check
7496 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7498 * @param pvMem The mapping.
7499 * @param fAccess The kind of access.
7500 */
7501VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7502{
7503 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7504 AssertMsgReturn( (bUnmapInfo & 0x08)
7505 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7506 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7507 == ((unsigned)bUnmapInfo >> 4),
7508 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7509 VERR_NOT_FOUND);
7510
7511 /* If it's bounce buffered, we may need to write back the buffer. */
7512 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7513 {
7514 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7515 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7516 }
7517 /* Otherwise unlock it. */
7518 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7519 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7520
7521 /* Free the entry. */
7522 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7523 Assert(pVCpu->iem.s.cActiveMappings != 0);
7524 pVCpu->iem.s.cActiveMappings--;
7525 return VINF_SUCCESS;
7526}
7527#endif
7528
7529
7530/**
7531 * Rollbacks mappings, releasing page locks and such.
7532 *
7533 * The caller shall only call this after checking cActiveMappings.
7534 *
7535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7536 */
7537void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7538{
7539 Assert(pVCpu->iem.s.cActiveMappings > 0);
7540
7541 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7542 while (iMemMap-- > 0)
7543 {
7544 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7545 if (fAccess != IEM_ACCESS_INVALID)
7546 {
7547 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7548 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7549 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7550 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7551 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7552 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7553 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7554 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7555 pVCpu->iem.s.cActiveMappings--;
7556 }
7557 }
7558}
7559
7560
7561/*
7562 * Instantiate R/W templates.
7563 */
7564#define TMPL_MEM_WITH_STACK
7565
7566#define TMPL_MEM_TYPE uint8_t
7567#define TMPL_MEM_FN_SUFF U8
7568#define TMPL_MEM_FMT_TYPE "%#04x"
7569#define TMPL_MEM_FMT_DESC "byte"
7570#include "IEMAllMemRWTmpl.cpp.h"
7571
7572#define TMPL_MEM_TYPE uint16_t
7573#define TMPL_MEM_FN_SUFF U16
7574#define TMPL_MEM_FMT_TYPE "%#06x"
7575#define TMPL_MEM_FMT_DESC "word"
7576#include "IEMAllMemRWTmpl.cpp.h"
7577
7578#define TMPL_WITH_PUSH_SREG
7579#define TMPL_MEM_TYPE uint32_t
7580#define TMPL_MEM_FN_SUFF U32
7581#define TMPL_MEM_FMT_TYPE "%#010x"
7582#define TMPL_MEM_FMT_DESC "dword"
7583#include "IEMAllMemRWTmpl.cpp.h"
7584#undef TMPL_WITH_PUSH_SREG
7585
7586#define TMPL_MEM_TYPE uint64_t
7587#define TMPL_MEM_FN_SUFF U64
7588#define TMPL_MEM_FMT_TYPE "%#018RX64"
7589#define TMPL_MEM_FMT_DESC "qword"
7590#include "IEMAllMemRWTmpl.cpp.h"
7591
7592#undef TMPL_MEM_WITH_STACK
7593
7594#define TMPL_MEM_TYPE uint32_t
7595#define TMPL_MEM_TYPE_ALIGN 0
7596#define TMPL_MEM_FN_SUFF U32NoAc
7597#define TMPL_MEM_FMT_TYPE "%#010x"
7598#define TMPL_MEM_FMT_DESC "dword"
7599#include "IEMAllMemRWTmpl.cpp.h"
7600#undef TMPL_WITH_PUSH_SREG
7601
7602#define TMPL_MEM_TYPE uint64_t
7603#define TMPL_MEM_TYPE_ALIGN 0
7604#define TMPL_MEM_FN_SUFF U64NoAc
7605#define TMPL_MEM_FMT_TYPE "%#018RX64"
7606#define TMPL_MEM_FMT_DESC "qword"
7607#include "IEMAllMemRWTmpl.cpp.h"
7608
7609#define TMPL_MEM_TYPE uint64_t
7610#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7611#define TMPL_MEM_FN_SUFF U64AlignedU128
7612#define TMPL_MEM_FMT_TYPE "%#018RX64"
7613#define TMPL_MEM_FMT_DESC "qword"
7614#include "IEMAllMemRWTmpl.cpp.h"
7615
7616/* See IEMAllMemRWTmplInline.cpp.h */
7617#define TMPL_MEM_BY_REF
7618
7619#define TMPL_MEM_TYPE RTFLOAT80U
7620#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7621#define TMPL_MEM_FN_SUFF R80
7622#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7623#define TMPL_MEM_FMT_DESC "tword"
7624#include "IEMAllMemRWTmpl.cpp.h"
7625
7626#define TMPL_MEM_TYPE RTPBCD80U
7627#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7628#define TMPL_MEM_FN_SUFF D80
7629#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7630#define TMPL_MEM_FMT_DESC "tword"
7631#include "IEMAllMemRWTmpl.cpp.h"
7632
7633#define TMPL_MEM_TYPE RTUINT128U
7634#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7635#define TMPL_MEM_FN_SUFF U128
7636#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7637#define TMPL_MEM_FMT_DESC "dqword"
7638#include "IEMAllMemRWTmpl.cpp.h"
7639
7640#define TMPL_MEM_TYPE RTUINT128U
7641#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7642#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7643#define TMPL_MEM_FN_SUFF U128AlignedSse
7644#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7645#define TMPL_MEM_FMT_DESC "dqword"
7646#include "IEMAllMemRWTmpl.cpp.h"
7647
7648#define TMPL_MEM_TYPE RTUINT128U
7649#define TMPL_MEM_TYPE_ALIGN 0
7650#define TMPL_MEM_FN_SUFF U128NoAc
7651#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7652#define TMPL_MEM_FMT_DESC "dqword"
7653#include "IEMAllMemRWTmpl.cpp.h"
7654
7655#define TMPL_MEM_TYPE RTUINT256U
7656#define TMPL_MEM_TYPE_ALIGN 0
7657#define TMPL_MEM_FN_SUFF U256NoAc
7658#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7659#define TMPL_MEM_FMT_DESC "qqword"
7660#include "IEMAllMemRWTmpl.cpp.h"
7661
7662#define TMPL_MEM_TYPE RTUINT256U
7663#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7664#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7665#define TMPL_MEM_FN_SUFF U256AlignedAvx
7666#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7667#define TMPL_MEM_FMT_DESC "qqword"
7668#include "IEMAllMemRWTmpl.cpp.h"
7669
7670/**
7671 * Fetches a data dword and zero extends it to a qword.
7672 *
7673 * @returns Strict VBox status code.
7674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7675 * @param pu64Dst Where to return the qword.
7676 * @param iSegReg The index of the segment register to use for
7677 * this access. The base and limits are checked.
7678 * @param GCPtrMem The address of the guest memory.
7679 */
7680VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7681{
7682 /* The lazy approach for now... */
7683 uint8_t bUnmapInfo;
7684 uint32_t const *pu32Src;
7685 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7686 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7687 if (rc == VINF_SUCCESS)
7688 {
7689 *pu64Dst = *pu32Src;
7690 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7691 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7692 }
7693 return rc;
7694}
7695
7696
7697#ifdef SOME_UNUSED_FUNCTION
7698/**
7699 * Fetches a data dword and sign extends it to a qword.
7700 *
7701 * @returns Strict VBox status code.
7702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7703 * @param pu64Dst Where to return the sign extended value.
7704 * @param iSegReg The index of the segment register to use for
7705 * this access. The base and limits are checked.
7706 * @param GCPtrMem The address of the guest memory.
7707 */
7708VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7709{
7710 /* The lazy approach for now... */
7711 uint8_t bUnmapInfo;
7712 int32_t const *pi32Src;
7713 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7714 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7715 if (rc == VINF_SUCCESS)
7716 {
7717 *pu64Dst = *pi32Src;
7718 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7719 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7720 }
7721#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7722 else
7723 *pu64Dst = 0;
7724#endif
7725 return rc;
7726}
7727#endif
7728
7729
7730/**
7731 * Fetches a descriptor register (lgdt, lidt).
7732 *
7733 * @returns Strict VBox status code.
7734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7735 * @param pcbLimit Where to return the limit.
7736 * @param pGCPtrBase Where to return the base.
7737 * @param iSegReg The index of the segment register to use for
7738 * this access. The base and limits are checked.
7739 * @param GCPtrMem The address of the guest memory.
7740 * @param enmOpSize The effective operand size.
7741 */
7742VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7743 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7744{
7745 /*
7746 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7747 * little special:
7748 * - The two reads are done separately.
7749 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7750 * - We suspect the 386 to actually commit the limit before the base in
7751 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7752 * don't try emulate this eccentric behavior, because it's not well
7753 * enough understood and rather hard to trigger.
7754 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7755 */
7756 VBOXSTRICTRC rcStrict;
7757 if (IEM_IS_64BIT_CODE(pVCpu))
7758 {
7759 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7760 if (rcStrict == VINF_SUCCESS)
7761 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7762 }
7763 else
7764 {
7765 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7766 if (enmOpSize == IEMMODE_32BIT)
7767 {
7768 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7769 {
7770 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7771 if (rcStrict == VINF_SUCCESS)
7772 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7773 }
7774 else
7775 {
7776 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7777 if (rcStrict == VINF_SUCCESS)
7778 {
7779 *pcbLimit = (uint16_t)uTmp;
7780 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7781 }
7782 }
7783 if (rcStrict == VINF_SUCCESS)
7784 *pGCPtrBase = uTmp;
7785 }
7786 else
7787 {
7788 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7789 if (rcStrict == VINF_SUCCESS)
7790 {
7791 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7792 if (rcStrict == VINF_SUCCESS)
7793 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7794 }
7795 }
7796 }
7797 return rcStrict;
7798}
7799
7800
7801/**
7802 * Stores a data dqword, SSE aligned.
7803 *
7804 * @returns Strict VBox status code.
7805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7806 * @param iSegReg The index of the segment register to use for
7807 * this access. The base and limits are checked.
7808 * @param GCPtrMem The address of the guest memory.
7809 * @param u128Value The value to store.
7810 */
7811VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7812{
7813 /* The lazy approach for now... */
7814 uint8_t bUnmapInfo;
7815 PRTUINT128U pu128Dst;
7816 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7817 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7818 if (rc == VINF_SUCCESS)
7819 {
7820 pu128Dst->au64[0] = u128Value.au64[0];
7821 pu128Dst->au64[1] = u128Value.au64[1];
7822 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7823 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7824 }
7825 return rc;
7826}
7827
7828
7829#ifdef IEM_WITH_SETJMP
7830/**
7831 * Stores a data dqword, SSE aligned.
7832 *
7833 * @returns Strict VBox status code.
7834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7835 * @param iSegReg The index of the segment register to use for
7836 * this access. The base and limits are checked.
7837 * @param GCPtrMem The address of the guest memory.
7838 * @param u128Value The value to store.
7839 */
7840void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7841 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7842{
7843 /* The lazy approach for now... */
7844 uint8_t bUnmapInfo;
7845 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7846 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7847 pu128Dst->au64[0] = u128Value.au64[0];
7848 pu128Dst->au64[1] = u128Value.au64[1];
7849 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7850 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7851}
7852#endif
7853
7854
7855/**
7856 * Stores a data dqword.
7857 *
7858 * @returns Strict VBox status code.
7859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7860 * @param iSegReg The index of the segment register to use for
7861 * this access. The base and limits are checked.
7862 * @param GCPtrMem The address of the guest memory.
7863 * @param pu256Value Pointer to the value to store.
7864 */
7865VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7866{
7867 /* The lazy approach for now... */
7868 uint8_t bUnmapInfo;
7869 PRTUINT256U pu256Dst;
7870 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7871 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7872 if (rc == VINF_SUCCESS)
7873 {
7874 pu256Dst->au64[0] = pu256Value->au64[0];
7875 pu256Dst->au64[1] = pu256Value->au64[1];
7876 pu256Dst->au64[2] = pu256Value->au64[2];
7877 pu256Dst->au64[3] = pu256Value->au64[3];
7878 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7879 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7880 }
7881 return rc;
7882}
7883
7884
7885#ifdef IEM_WITH_SETJMP
7886/**
7887 * Stores a data dqword, longjmp on error.
7888 *
7889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7890 * @param iSegReg The index of the segment register to use for
7891 * this access. The base and limits are checked.
7892 * @param GCPtrMem The address of the guest memory.
7893 * @param pu256Value Pointer to the value to store.
7894 */
7895void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7896{
7897 /* The lazy approach for now... */
7898 uint8_t bUnmapInfo;
7899 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7900 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7901 pu256Dst->au64[0] = pu256Value->au64[0];
7902 pu256Dst->au64[1] = pu256Value->au64[1];
7903 pu256Dst->au64[2] = pu256Value->au64[2];
7904 pu256Dst->au64[3] = pu256Value->au64[3];
7905 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7906 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7907}
7908#endif
7909
7910
7911/**
7912 * Stores a descriptor register (sgdt, sidt).
7913 *
7914 * @returns Strict VBox status code.
7915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7916 * @param cbLimit The limit.
7917 * @param GCPtrBase The base address.
7918 * @param iSegReg The index of the segment register to use for
7919 * this access. The base and limits are checked.
7920 * @param GCPtrMem The address of the guest memory.
7921 */
7922VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7923{
7924 /*
7925 * The SIDT and SGDT instructions actually stores the data using two
7926 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7927 * does not respond to opsize prefixes.
7928 */
7929 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7930 if (rcStrict == VINF_SUCCESS)
7931 {
7932 if (IEM_IS_16BIT_CODE(pVCpu))
7933 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7934 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7935 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7936 else if (IEM_IS_32BIT_CODE(pVCpu))
7937 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7938 else
7939 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7940 }
7941 return rcStrict;
7942}
7943
7944
7945/**
7946 * Begin a special stack push (used by interrupt, exceptions and such).
7947 *
7948 * This will raise \#SS or \#PF if appropriate.
7949 *
7950 * @returns Strict VBox status code.
7951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7952 * @param cbMem The number of bytes to push onto the stack.
7953 * @param cbAlign The alignment mask (7, 3, 1).
7954 * @param ppvMem Where to return the pointer to the stack memory.
7955 * As with the other memory functions this could be
7956 * direct access or bounce buffered access, so
7957 * don't commit register until the commit call
7958 * succeeds.
7959 * @param pbUnmapInfo Where to store unmap info for
7960 * iemMemStackPushCommitSpecial.
7961 * @param puNewRsp Where to return the new RSP value. This must be
7962 * passed unchanged to
7963 * iemMemStackPushCommitSpecial().
7964 */
7965VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7966 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7967{
7968 Assert(cbMem < UINT8_MAX);
7969 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7970 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7971}
7972
7973
7974/**
7975 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7976 *
7977 * This will update the rSP.
7978 *
7979 * @returns Strict VBox status code.
7980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7981 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7982 * @param uNewRsp The new RSP value returned by
7983 * iemMemStackPushBeginSpecial().
7984 */
7985VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7986{
7987 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7988 if (rcStrict == VINF_SUCCESS)
7989 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7990 return rcStrict;
7991}
7992
7993
7994/**
7995 * Begin a special stack pop (used by iret, retf and such).
7996 *
7997 * This will raise \#SS or \#PF if appropriate.
7998 *
7999 * @returns Strict VBox status code.
8000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8001 * @param cbMem The number of bytes to pop from the stack.
8002 * @param cbAlign The alignment mask (7, 3, 1).
8003 * @param ppvMem Where to return the pointer to the stack memory.
8004 * @param pbUnmapInfo Where to store unmap info for
8005 * iemMemStackPopDoneSpecial.
8006 * @param puNewRsp Where to return the new RSP value. This must be
8007 * assigned to CPUMCTX::rsp manually some time
8008 * after iemMemStackPopDoneSpecial() has been
8009 * called.
8010 */
8011VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8012 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
8013{
8014 Assert(cbMem < UINT8_MAX);
8015 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8016 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8017}
8018
8019
8020/**
8021 * Continue a special stack pop (used by iret and retf), for the purpose of
8022 * retrieving a new stack pointer.
8023 *
8024 * This will raise \#SS or \#PF if appropriate.
8025 *
8026 * @returns Strict VBox status code.
8027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8028 * @param off Offset from the top of the stack. This is zero
8029 * except in the retf case.
8030 * @param cbMem The number of bytes to pop from the stack.
8031 * @param ppvMem Where to return the pointer to the stack memory.
8032 * @param pbUnmapInfo Where to store unmap info for
8033 * iemMemStackPopDoneSpecial.
8034 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8035 * return this because all use of this function is
8036 * to retrieve a new value and anything we return
8037 * here would be discarded.)
8038 */
8039VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8040 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
8041{
8042 Assert(cbMem < UINT8_MAX);
8043
8044 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8045 RTGCPTR GCPtrTop;
8046 if (IEM_IS_64BIT_CODE(pVCpu))
8047 GCPtrTop = uCurNewRsp;
8048 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8049 GCPtrTop = (uint32_t)uCurNewRsp;
8050 else
8051 GCPtrTop = (uint16_t)uCurNewRsp;
8052
8053 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8054 0 /* checked in iemMemStackPopBeginSpecial */);
8055}
8056
8057
8058/**
8059 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8060 * iemMemStackPopContinueSpecial).
8061 *
8062 * The caller will manually commit the rSP.
8063 *
8064 * @returns Strict VBox status code.
8065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8066 * @param bUnmapInfo Unmap information returned by
8067 * iemMemStackPopBeginSpecial() or
8068 * iemMemStackPopContinueSpecial().
8069 */
8070VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
8071{
8072 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8073}
8074
8075
8076/**
8077 * Fetches a system table byte.
8078 *
8079 * @returns Strict VBox status code.
8080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8081 * @param pbDst Where to return the byte.
8082 * @param iSegReg The index of the segment register to use for
8083 * this access. The base and limits are checked.
8084 * @param GCPtrMem The address of the guest memory.
8085 */
8086VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8087{
8088 /* The lazy approach for now... */
8089 uint8_t bUnmapInfo;
8090 uint8_t const *pbSrc;
8091 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8092 if (rc == VINF_SUCCESS)
8093 {
8094 *pbDst = *pbSrc;
8095 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8096 }
8097 return rc;
8098}
8099
8100
8101/**
8102 * Fetches a system table word.
8103 *
8104 * @returns Strict VBox status code.
8105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8106 * @param pu16Dst Where to return the word.
8107 * @param iSegReg The index of the segment register to use for
8108 * this access. The base and limits are checked.
8109 * @param GCPtrMem The address of the guest memory.
8110 */
8111VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8112{
8113 /* The lazy approach for now... */
8114 uint8_t bUnmapInfo;
8115 uint16_t const *pu16Src;
8116 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8117 if (rc == VINF_SUCCESS)
8118 {
8119 *pu16Dst = *pu16Src;
8120 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8121 }
8122 return rc;
8123}
8124
8125
8126/**
8127 * Fetches a system table dword.
8128 *
8129 * @returns Strict VBox status code.
8130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8131 * @param pu32Dst Where to return the dword.
8132 * @param iSegReg The index of the segment register to use for
8133 * this access. The base and limits are checked.
8134 * @param GCPtrMem The address of the guest memory.
8135 */
8136VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8137{
8138 /* The lazy approach for now... */
8139 uint8_t bUnmapInfo;
8140 uint32_t const *pu32Src;
8141 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8142 if (rc == VINF_SUCCESS)
8143 {
8144 *pu32Dst = *pu32Src;
8145 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8146 }
8147 return rc;
8148}
8149
8150
8151/**
8152 * Fetches a system table qword.
8153 *
8154 * @returns Strict VBox status code.
8155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8156 * @param pu64Dst Where to return the qword.
8157 * @param iSegReg The index of the segment register to use for
8158 * this access. The base and limits are checked.
8159 * @param GCPtrMem The address of the guest memory.
8160 */
8161VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8162{
8163 /* The lazy approach for now... */
8164 uint8_t bUnmapInfo;
8165 uint64_t const *pu64Src;
8166 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8167 if (rc == VINF_SUCCESS)
8168 {
8169 *pu64Dst = *pu64Src;
8170 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8171 }
8172 return rc;
8173}
8174
8175
8176/**
8177 * Fetches a descriptor table entry with caller specified error code.
8178 *
8179 * @returns Strict VBox status code.
8180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8181 * @param pDesc Where to return the descriptor table entry.
8182 * @param uSel The selector which table entry to fetch.
8183 * @param uXcpt The exception to raise on table lookup error.
8184 * @param uErrorCode The error code associated with the exception.
8185 */
8186static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8187 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8188{
8189 AssertPtr(pDesc);
8190 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8191
8192 /** @todo did the 286 require all 8 bytes to be accessible? */
8193 /*
8194 * Get the selector table base and check bounds.
8195 */
8196 RTGCPTR GCPtrBase;
8197 if (uSel & X86_SEL_LDT)
8198 {
8199 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8200 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8201 {
8202 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8203 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8204 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8205 uErrorCode, 0);
8206 }
8207
8208 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8209 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8210 }
8211 else
8212 {
8213 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8214 {
8215 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8216 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8217 uErrorCode, 0);
8218 }
8219 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8220 }
8221
8222 /*
8223 * Read the legacy descriptor and maybe the long mode extensions if
8224 * required.
8225 */
8226 VBOXSTRICTRC rcStrict;
8227 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8228 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8229 else
8230 {
8231 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8232 if (rcStrict == VINF_SUCCESS)
8233 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8234 if (rcStrict == VINF_SUCCESS)
8235 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8236 if (rcStrict == VINF_SUCCESS)
8237 pDesc->Legacy.au16[3] = 0;
8238 else
8239 return rcStrict;
8240 }
8241
8242 if (rcStrict == VINF_SUCCESS)
8243 {
8244 if ( !IEM_IS_LONG_MODE(pVCpu)
8245 || pDesc->Legacy.Gen.u1DescType)
8246 pDesc->Long.au64[1] = 0;
8247 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8248 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8249 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8250 else
8251 {
8252 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8253 /** @todo is this the right exception? */
8254 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8255 }
8256 }
8257 return rcStrict;
8258}
8259
8260
8261/**
8262 * Fetches a descriptor table entry.
8263 *
8264 * @returns Strict VBox status code.
8265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8266 * @param pDesc Where to return the descriptor table entry.
8267 * @param uSel The selector which table entry to fetch.
8268 * @param uXcpt The exception to raise on table lookup error.
8269 */
8270VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8271{
8272 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8273}
8274
8275
8276/**
8277 * Marks the selector descriptor as accessed (only non-system descriptors).
8278 *
8279 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8280 * will therefore skip the limit checks.
8281 *
8282 * @returns Strict VBox status code.
8283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8284 * @param uSel The selector.
8285 */
8286VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8287{
8288 /*
8289 * Get the selector table base and calculate the entry address.
8290 */
8291 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8292 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8293 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8294 GCPtr += uSel & X86_SEL_MASK;
8295
8296 /*
8297 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8298 * ugly stuff to avoid this. This will make sure it's an atomic access
8299 * as well more or less remove any question about 8-bit or 32-bit accesss.
8300 */
8301 VBOXSTRICTRC rcStrict;
8302 uint8_t bUnmapInfo;
8303 uint32_t volatile *pu32;
8304 if ((GCPtr & 3) == 0)
8305 {
8306 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8307 GCPtr += 2 + 2;
8308 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8309 if (rcStrict != VINF_SUCCESS)
8310 return rcStrict;
8311 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8312 }
8313 else
8314 {
8315 /* The misaligned GDT/LDT case, map the whole thing. */
8316 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8317 if (rcStrict != VINF_SUCCESS)
8318 return rcStrict;
8319 switch ((uintptr_t)pu32 & 3)
8320 {
8321 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8322 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8323 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8324 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8325 }
8326 }
8327
8328 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8329}
8330
8331
8332#undef LOG_GROUP
8333#define LOG_GROUP LOG_GROUP_IEM
8334
8335/** @} */
8336
8337/** @name Opcode Helpers.
8338 * @{
8339 */
8340
8341/**
8342 * Calculates the effective address of a ModR/M memory operand.
8343 *
8344 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8345 *
8346 * @return Strict VBox status code.
8347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8348 * @param bRm The ModRM byte.
8349 * @param cbImmAndRspOffset - First byte: The size of any immediate
8350 * following the effective address opcode bytes
8351 * (only for RIP relative addressing).
8352 * - Second byte: RSP displacement (for POP [ESP]).
8353 * @param pGCPtrEff Where to return the effective address.
8354 */
8355VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8356{
8357 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8358# define SET_SS_DEF() \
8359 do \
8360 { \
8361 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8362 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8363 } while (0)
8364
8365 if (!IEM_IS_64BIT_CODE(pVCpu))
8366 {
8367/** @todo Check the effective address size crap! */
8368 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8369 {
8370 uint16_t u16EffAddr;
8371
8372 /* Handle the disp16 form with no registers first. */
8373 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8374 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8375 else
8376 {
8377 /* Get the displacment. */
8378 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8379 {
8380 case 0: u16EffAddr = 0; break;
8381 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8382 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8383 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8384 }
8385
8386 /* Add the base and index registers to the disp. */
8387 switch (bRm & X86_MODRM_RM_MASK)
8388 {
8389 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8390 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8391 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8392 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8393 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8394 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8395 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8396 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8397 }
8398 }
8399
8400 *pGCPtrEff = u16EffAddr;
8401 }
8402 else
8403 {
8404 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8405 uint32_t u32EffAddr;
8406
8407 /* Handle the disp32 form with no registers first. */
8408 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8409 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8410 else
8411 {
8412 /* Get the register (or SIB) value. */
8413 switch ((bRm & X86_MODRM_RM_MASK))
8414 {
8415 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8416 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8417 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8418 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8419 case 4: /* SIB */
8420 {
8421 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8422
8423 /* Get the index and scale it. */
8424 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8425 {
8426 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8427 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8428 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8429 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8430 case 4: u32EffAddr = 0; /*none */ break;
8431 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8432 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8433 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8434 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8435 }
8436 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8437
8438 /* add base */
8439 switch (bSib & X86_SIB_BASE_MASK)
8440 {
8441 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8442 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8443 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8444 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8445 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8446 case 5:
8447 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8448 {
8449 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8450 SET_SS_DEF();
8451 }
8452 else
8453 {
8454 uint32_t u32Disp;
8455 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8456 u32EffAddr += u32Disp;
8457 }
8458 break;
8459 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8460 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8461 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8462 }
8463 break;
8464 }
8465 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8466 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8467 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8468 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8469 }
8470
8471 /* Get and add the displacement. */
8472 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8473 {
8474 case 0:
8475 break;
8476 case 1:
8477 {
8478 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8479 u32EffAddr += i8Disp;
8480 break;
8481 }
8482 case 2:
8483 {
8484 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8485 u32EffAddr += u32Disp;
8486 break;
8487 }
8488 default:
8489 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8490 }
8491
8492 }
8493 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8494 *pGCPtrEff = u32EffAddr;
8495 }
8496 }
8497 else
8498 {
8499 uint64_t u64EffAddr;
8500
8501 /* Handle the rip+disp32 form with no registers first. */
8502 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8503 {
8504 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8505 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8506 }
8507 else
8508 {
8509 /* Get the register (or SIB) value. */
8510 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8511 {
8512 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8513 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8514 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8515 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8516 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8517 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8518 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8519 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8520 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8521 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8522 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8523 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8524 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8525 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8526 /* SIB */
8527 case 4:
8528 case 12:
8529 {
8530 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8531
8532 /* Get the index and scale it. */
8533 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8534 {
8535 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8536 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8537 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8538 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8539 case 4: u64EffAddr = 0; /*none */ break;
8540 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8541 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8542 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8543 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8544 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8545 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8546 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8547 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8548 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8549 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8550 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8551 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8552 }
8553 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8554
8555 /* add base */
8556 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8557 {
8558 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8559 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8560 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8561 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8562 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8563 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8564 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8565 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8566 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8567 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8568 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8569 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8570 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8571 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8572 /* complicated encodings */
8573 case 5:
8574 case 13:
8575 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8576 {
8577 if (!pVCpu->iem.s.uRexB)
8578 {
8579 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8580 SET_SS_DEF();
8581 }
8582 else
8583 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8584 }
8585 else
8586 {
8587 uint32_t u32Disp;
8588 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8589 u64EffAddr += (int32_t)u32Disp;
8590 }
8591 break;
8592 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8593 }
8594 break;
8595 }
8596 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8597 }
8598
8599 /* Get and add the displacement. */
8600 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8601 {
8602 case 0:
8603 break;
8604 case 1:
8605 {
8606 int8_t i8Disp;
8607 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8608 u64EffAddr += i8Disp;
8609 break;
8610 }
8611 case 2:
8612 {
8613 uint32_t u32Disp;
8614 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8615 u64EffAddr += (int32_t)u32Disp;
8616 break;
8617 }
8618 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8619 }
8620
8621 }
8622
8623 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8624 *pGCPtrEff = u64EffAddr;
8625 else
8626 {
8627 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8628 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8629 }
8630 }
8631
8632 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8633 return VINF_SUCCESS;
8634}
8635
8636
8637#ifdef IEM_WITH_SETJMP
8638/**
8639 * Calculates the effective address of a ModR/M memory operand.
8640 *
8641 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8642 *
8643 * May longjmp on internal error.
8644 *
8645 * @return The effective address.
8646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8647 * @param bRm The ModRM byte.
8648 * @param cbImmAndRspOffset - First byte: The size of any immediate
8649 * following the effective address opcode bytes
8650 * (only for RIP relative addressing).
8651 * - Second byte: RSP displacement (for POP [ESP]).
8652 */
8653RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8654{
8655 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8656# define SET_SS_DEF() \
8657 do \
8658 { \
8659 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8660 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8661 } while (0)
8662
8663 if (!IEM_IS_64BIT_CODE(pVCpu))
8664 {
8665/** @todo Check the effective address size crap! */
8666 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8667 {
8668 uint16_t u16EffAddr;
8669
8670 /* Handle the disp16 form with no registers first. */
8671 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8672 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8673 else
8674 {
8675 /* Get the displacment. */
8676 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8677 {
8678 case 0: u16EffAddr = 0; break;
8679 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8680 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8681 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8682 }
8683
8684 /* Add the base and index registers to the disp. */
8685 switch (bRm & X86_MODRM_RM_MASK)
8686 {
8687 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8688 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8689 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8690 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8691 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8692 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8693 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8694 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8695 }
8696 }
8697
8698 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8699 return u16EffAddr;
8700 }
8701
8702 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8703 uint32_t u32EffAddr;
8704
8705 /* Handle the disp32 form with no registers first. */
8706 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8707 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8708 else
8709 {
8710 /* Get the register (or SIB) value. */
8711 switch ((bRm & X86_MODRM_RM_MASK))
8712 {
8713 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8714 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8715 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8716 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8717 case 4: /* SIB */
8718 {
8719 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8720
8721 /* Get the index and scale it. */
8722 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8723 {
8724 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8725 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8726 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8727 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8728 case 4: u32EffAddr = 0; /*none */ break;
8729 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8730 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8731 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8732 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8733 }
8734 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8735
8736 /* add base */
8737 switch (bSib & X86_SIB_BASE_MASK)
8738 {
8739 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8740 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8741 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8742 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8743 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8744 case 5:
8745 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8746 {
8747 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8748 SET_SS_DEF();
8749 }
8750 else
8751 {
8752 uint32_t u32Disp;
8753 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8754 u32EffAddr += u32Disp;
8755 }
8756 break;
8757 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8758 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8759 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8760 }
8761 break;
8762 }
8763 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8764 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8765 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8766 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8767 }
8768
8769 /* Get and add the displacement. */
8770 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8771 {
8772 case 0:
8773 break;
8774 case 1:
8775 {
8776 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8777 u32EffAddr += i8Disp;
8778 break;
8779 }
8780 case 2:
8781 {
8782 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8783 u32EffAddr += u32Disp;
8784 break;
8785 }
8786 default:
8787 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8788 }
8789 }
8790
8791 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8792 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8793 return u32EffAddr;
8794 }
8795
8796 uint64_t u64EffAddr;
8797
8798 /* Handle the rip+disp32 form with no registers first. */
8799 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8800 {
8801 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8802 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8803 }
8804 else
8805 {
8806 /* Get the register (or SIB) value. */
8807 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8808 {
8809 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8810 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8811 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8812 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8813 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8814 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8815 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8816 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8817 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8818 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8819 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8820 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8821 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8822 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8823 /* SIB */
8824 case 4:
8825 case 12:
8826 {
8827 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8828
8829 /* Get the index and scale it. */
8830 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8831 {
8832 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8833 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8834 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8835 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8836 case 4: u64EffAddr = 0; /*none */ break;
8837 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8838 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8839 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8840 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8841 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8842 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8843 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8844 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8845 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8846 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8847 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8848 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8849 }
8850 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8851
8852 /* add base */
8853 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8854 {
8855 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8856 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8857 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8858 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8859 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8860 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8861 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8862 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8863 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8864 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8865 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8866 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8867 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8868 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8869 /* complicated encodings */
8870 case 5:
8871 case 13:
8872 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8873 {
8874 if (!pVCpu->iem.s.uRexB)
8875 {
8876 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8877 SET_SS_DEF();
8878 }
8879 else
8880 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8881 }
8882 else
8883 {
8884 uint32_t u32Disp;
8885 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8886 u64EffAddr += (int32_t)u32Disp;
8887 }
8888 break;
8889 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8890 }
8891 break;
8892 }
8893 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8894 }
8895
8896 /* Get and add the displacement. */
8897 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8898 {
8899 case 0:
8900 break;
8901 case 1:
8902 {
8903 int8_t i8Disp;
8904 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8905 u64EffAddr += i8Disp;
8906 break;
8907 }
8908 case 2:
8909 {
8910 uint32_t u32Disp;
8911 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8912 u64EffAddr += (int32_t)u32Disp;
8913 break;
8914 }
8915 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8916 }
8917
8918 }
8919
8920 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8921 {
8922 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8923 return u64EffAddr;
8924 }
8925 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8926 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8927 return u64EffAddr & UINT32_MAX;
8928}
8929#endif /* IEM_WITH_SETJMP */
8930
8931
8932/**
8933 * Calculates the effective address of a ModR/M memory operand, extended version
8934 * for use in the recompilers.
8935 *
8936 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8937 *
8938 * @return Strict VBox status code.
8939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8940 * @param bRm The ModRM byte.
8941 * @param cbImmAndRspOffset - First byte: The size of any immediate
8942 * following the effective address opcode bytes
8943 * (only for RIP relative addressing).
8944 * - Second byte: RSP displacement (for POP [ESP]).
8945 * @param pGCPtrEff Where to return the effective address.
8946 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8947 * SIB byte (bits 39:32).
8948 */
8949VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8950{
8951 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8952# define SET_SS_DEF() \
8953 do \
8954 { \
8955 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8956 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8957 } while (0)
8958
8959 uint64_t uInfo;
8960 if (!IEM_IS_64BIT_CODE(pVCpu))
8961 {
8962/** @todo Check the effective address size crap! */
8963 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8964 {
8965 uint16_t u16EffAddr;
8966
8967 /* Handle the disp16 form with no registers first. */
8968 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8969 {
8970 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8971 uInfo = u16EffAddr;
8972 }
8973 else
8974 {
8975 /* Get the displacment. */
8976 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8977 {
8978 case 0: u16EffAddr = 0; break;
8979 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8980 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8981 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8982 }
8983 uInfo = u16EffAddr;
8984
8985 /* Add the base and index registers to the disp. */
8986 switch (bRm & X86_MODRM_RM_MASK)
8987 {
8988 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8989 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8990 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8991 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8992 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8993 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8994 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8995 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8996 }
8997 }
8998
8999 *pGCPtrEff = u16EffAddr;
9000 }
9001 else
9002 {
9003 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9004 uint32_t u32EffAddr;
9005
9006 /* Handle the disp32 form with no registers first. */
9007 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9008 {
9009 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9010 uInfo = u32EffAddr;
9011 }
9012 else
9013 {
9014 /* Get the register (or SIB) value. */
9015 uInfo = 0;
9016 switch ((bRm & X86_MODRM_RM_MASK))
9017 {
9018 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9019 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9020 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9021 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9022 case 4: /* SIB */
9023 {
9024 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9025 uInfo = (uint64_t)bSib << 32;
9026
9027 /* Get the index and scale it. */
9028 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9029 {
9030 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9031 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9032 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9033 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9034 case 4: u32EffAddr = 0; /*none */ break;
9035 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9036 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9037 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9038 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9039 }
9040 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9041
9042 /* add base */
9043 switch (bSib & X86_SIB_BASE_MASK)
9044 {
9045 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9046 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9047 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9048 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9049 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9050 case 5:
9051 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9052 {
9053 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9054 SET_SS_DEF();
9055 }
9056 else
9057 {
9058 uint32_t u32Disp;
9059 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9060 u32EffAddr += u32Disp;
9061 uInfo |= u32Disp;
9062 }
9063 break;
9064 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9065 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9066 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9067 }
9068 break;
9069 }
9070 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9071 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9072 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9073 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9074 }
9075
9076 /* Get and add the displacement. */
9077 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9078 {
9079 case 0:
9080 break;
9081 case 1:
9082 {
9083 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9084 u32EffAddr += i8Disp;
9085 uInfo |= (uint32_t)(int32_t)i8Disp;
9086 break;
9087 }
9088 case 2:
9089 {
9090 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9091 u32EffAddr += u32Disp;
9092 uInfo |= (uint32_t)u32Disp;
9093 break;
9094 }
9095 default:
9096 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9097 }
9098
9099 }
9100 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9101 *pGCPtrEff = u32EffAddr;
9102 }
9103 }
9104 else
9105 {
9106 uint64_t u64EffAddr;
9107
9108 /* Handle the rip+disp32 form with no registers first. */
9109 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9110 {
9111 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9112 uInfo = (uint32_t)u64EffAddr;
9113 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9114 }
9115 else
9116 {
9117 /* Get the register (or SIB) value. */
9118 uInfo = 0;
9119 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9120 {
9121 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9122 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9123 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9124 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9125 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9126 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9127 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9128 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9129 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9130 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9131 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9132 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9133 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9134 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9135 /* SIB */
9136 case 4:
9137 case 12:
9138 {
9139 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9140 uInfo = (uint64_t)bSib << 32;
9141
9142 /* Get the index and scale it. */
9143 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9144 {
9145 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9146 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9147 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9148 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9149 case 4: u64EffAddr = 0; /*none */ break;
9150 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9151 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9152 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9153 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9154 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9155 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9156 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9157 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9158 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9159 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9160 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9161 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9162 }
9163 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9164
9165 /* add base */
9166 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9167 {
9168 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9169 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9170 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9171 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9172 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9173 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9174 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9175 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9176 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9177 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9178 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9179 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9180 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9181 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9182 /* complicated encodings */
9183 case 5:
9184 case 13:
9185 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9186 {
9187 if (!pVCpu->iem.s.uRexB)
9188 {
9189 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9190 SET_SS_DEF();
9191 }
9192 else
9193 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9194 }
9195 else
9196 {
9197 uint32_t u32Disp;
9198 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9199 u64EffAddr += (int32_t)u32Disp;
9200 uInfo |= u32Disp;
9201 }
9202 break;
9203 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9204 }
9205 break;
9206 }
9207 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9208 }
9209
9210 /* Get and add the displacement. */
9211 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9212 {
9213 case 0:
9214 break;
9215 case 1:
9216 {
9217 int8_t i8Disp;
9218 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9219 u64EffAddr += i8Disp;
9220 uInfo |= (uint32_t)(int32_t)i8Disp;
9221 break;
9222 }
9223 case 2:
9224 {
9225 uint32_t u32Disp;
9226 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9227 u64EffAddr += (int32_t)u32Disp;
9228 uInfo |= u32Disp;
9229 break;
9230 }
9231 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9232 }
9233
9234 }
9235
9236 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9237 *pGCPtrEff = u64EffAddr;
9238 else
9239 {
9240 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9241 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9242 }
9243 }
9244 *puInfo = uInfo;
9245
9246 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9247 return VINF_SUCCESS;
9248}
9249
9250/** @} */
9251
9252
9253#ifdef LOG_ENABLED
9254/**
9255 * Logs the current instruction.
9256 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9257 * @param fSameCtx Set if we have the same context information as the VMM,
9258 * clear if we may have already executed an instruction in
9259 * our debug context. When clear, we assume IEMCPU holds
9260 * valid CPU mode info.
9261 *
9262 * The @a fSameCtx parameter is now misleading and obsolete.
9263 * @param pszFunction The IEM function doing the execution.
9264 */
9265static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9266{
9267# ifdef IN_RING3
9268 if (LogIs2Enabled())
9269 {
9270 char szInstr[256];
9271 uint32_t cbInstr = 0;
9272 if (fSameCtx)
9273 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9274 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9275 szInstr, sizeof(szInstr), &cbInstr);
9276 else
9277 {
9278 uint32_t fFlags = 0;
9279 switch (IEM_GET_CPU_MODE(pVCpu))
9280 {
9281 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9282 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9283 case IEMMODE_16BIT:
9284 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9285 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9286 else
9287 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9288 break;
9289 }
9290 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9291 szInstr, sizeof(szInstr), &cbInstr);
9292 }
9293
9294 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9295 Log2(("**** %s fExec=%x\n"
9296 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9297 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9298 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9299 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9300 " %s\n"
9301 , pszFunction, pVCpu->iem.s.fExec,
9302 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9303 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9304 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9305 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9306 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9307 szInstr));
9308
9309 /* This stuff sucks atm. as it fills the log with MSRs. */
9310 //if (LogIs3Enabled())
9311 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9312 }
9313 else
9314# endif
9315 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9316 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9317 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9318}
9319#endif /* LOG_ENABLED */
9320
9321
9322#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9323/**
9324 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9325 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9326 *
9327 * @returns Modified rcStrict.
9328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9329 * @param rcStrict The instruction execution status.
9330 */
9331static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9332{
9333 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9334 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9335 {
9336 /* VMX preemption timer takes priority over NMI-window exits. */
9337 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9338 {
9339 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9340 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9341 }
9342 /*
9343 * Check remaining intercepts.
9344 *
9345 * NMI-window and Interrupt-window VM-exits.
9346 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9347 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9348 *
9349 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9350 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9351 */
9352 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9353 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9354 && !TRPMHasTrap(pVCpu))
9355 {
9356 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9357 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9358 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9359 {
9360 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9361 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9362 }
9363 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9364 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9365 {
9366 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9367 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9368 }
9369 }
9370 }
9371 /* TPR-below threshold/APIC write has the highest priority. */
9372 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9373 {
9374 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9375 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9376 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9377 }
9378 /* MTF takes priority over VMX-preemption timer. */
9379 else
9380 {
9381 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9382 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9383 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9384 }
9385 return rcStrict;
9386}
9387#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9388
9389
9390/**
9391 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9392 * IEMExecOneWithPrefetchedByPC.
9393 *
9394 * Similar code is found in IEMExecLots.
9395 *
9396 * @return Strict VBox status code.
9397 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9398 * @param fExecuteInhibit If set, execute the instruction following CLI,
9399 * POP SS and MOV SS,GR.
9400 * @param pszFunction The calling function name.
9401 */
9402DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9403{
9404 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9405 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9406 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9407 RT_NOREF_PV(pszFunction);
9408
9409#ifdef IEM_WITH_SETJMP
9410 VBOXSTRICTRC rcStrict;
9411 IEM_TRY_SETJMP(pVCpu, rcStrict)
9412 {
9413 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9414 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9415 }
9416 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9417 {
9418 pVCpu->iem.s.cLongJumps++;
9419 }
9420 IEM_CATCH_LONGJMP_END(pVCpu);
9421#else
9422 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9423 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9424#endif
9425 if (rcStrict == VINF_SUCCESS)
9426 pVCpu->iem.s.cInstructions++;
9427 if (pVCpu->iem.s.cActiveMappings > 0)
9428 {
9429 Assert(rcStrict != VINF_SUCCESS);
9430 iemMemRollback(pVCpu);
9431 }
9432 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9433 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9434 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9435
9436//#ifdef DEBUG
9437// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9438//#endif
9439
9440#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9441 /*
9442 * Perform any VMX nested-guest instruction boundary actions.
9443 *
9444 * If any of these causes a VM-exit, we must skip executing the next
9445 * instruction (would run into stale page tables). A VM-exit makes sure
9446 * there is no interrupt-inhibition, so that should ensure we don't go
9447 * to try execute the next instruction. Clearing fExecuteInhibit is
9448 * problematic because of the setjmp/longjmp clobbering above.
9449 */
9450 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9451 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9452 || rcStrict != VINF_SUCCESS)
9453 { /* likely */ }
9454 else
9455 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9456#endif
9457
9458 /* Execute the next instruction as well if a cli, pop ss or
9459 mov ss, Gr has just completed successfully. */
9460 if ( fExecuteInhibit
9461 && rcStrict == VINF_SUCCESS
9462 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9463 {
9464 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9465 if (rcStrict == VINF_SUCCESS)
9466 {
9467#ifdef LOG_ENABLED
9468 iemLogCurInstr(pVCpu, false, pszFunction);
9469#endif
9470#ifdef IEM_WITH_SETJMP
9471 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9472 {
9473 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9474 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9475 }
9476 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9477 {
9478 pVCpu->iem.s.cLongJumps++;
9479 }
9480 IEM_CATCH_LONGJMP_END(pVCpu);
9481#else
9482 IEM_OPCODE_GET_FIRST_U8(&b);
9483 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9484#endif
9485 if (rcStrict == VINF_SUCCESS)
9486 {
9487 pVCpu->iem.s.cInstructions++;
9488#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9489 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9490 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9491 { /* likely */ }
9492 else
9493 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9494#endif
9495 }
9496 if (pVCpu->iem.s.cActiveMappings > 0)
9497 {
9498 Assert(rcStrict != VINF_SUCCESS);
9499 iemMemRollback(pVCpu);
9500 }
9501 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9502 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9503 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9504 }
9505 else if (pVCpu->iem.s.cActiveMappings > 0)
9506 iemMemRollback(pVCpu);
9507 /** @todo drop this after we bake this change into RIP advancing. */
9508 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9509 }
9510
9511 /*
9512 * Return value fiddling, statistics and sanity assertions.
9513 */
9514 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9515
9516 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9517 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9518 return rcStrict;
9519}
9520
9521
9522/**
9523 * Execute one instruction.
9524 *
9525 * @return Strict VBox status code.
9526 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9527 */
9528VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9529{
9530 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9531#ifdef LOG_ENABLED
9532 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9533#endif
9534
9535 /*
9536 * Do the decoding and emulation.
9537 */
9538 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9539 if (rcStrict == VINF_SUCCESS)
9540 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9541 else if (pVCpu->iem.s.cActiveMappings > 0)
9542 iemMemRollback(pVCpu);
9543
9544 if (rcStrict != VINF_SUCCESS)
9545 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9546 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9547 return rcStrict;
9548}
9549
9550
9551VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9552{
9553 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9554 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9555 if (rcStrict == VINF_SUCCESS)
9556 {
9557 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9558 if (pcbWritten)
9559 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9560 }
9561 else if (pVCpu->iem.s.cActiveMappings > 0)
9562 iemMemRollback(pVCpu);
9563
9564 return rcStrict;
9565}
9566
9567
9568VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9569 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9570{
9571 VBOXSTRICTRC rcStrict;
9572 if ( cbOpcodeBytes
9573 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9574 {
9575 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9576#ifdef IEM_WITH_CODE_TLB
9577 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9578 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9579 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9580 pVCpu->iem.s.offCurInstrStart = 0;
9581 pVCpu->iem.s.offInstrNextByte = 0;
9582 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9583#else
9584 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9585 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9586#endif
9587 rcStrict = VINF_SUCCESS;
9588 }
9589 else
9590 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9591 if (rcStrict == VINF_SUCCESS)
9592 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9593 else if (pVCpu->iem.s.cActiveMappings > 0)
9594 iemMemRollback(pVCpu);
9595
9596 return rcStrict;
9597}
9598
9599
9600VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9601{
9602 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9603 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9604 if (rcStrict == VINF_SUCCESS)
9605 {
9606 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9607 if (pcbWritten)
9608 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9609 }
9610 else if (pVCpu->iem.s.cActiveMappings > 0)
9611 iemMemRollback(pVCpu);
9612
9613 return rcStrict;
9614}
9615
9616
9617VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9618 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9619{
9620 VBOXSTRICTRC rcStrict;
9621 if ( cbOpcodeBytes
9622 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9623 {
9624 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9625#ifdef IEM_WITH_CODE_TLB
9626 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9627 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9628 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9629 pVCpu->iem.s.offCurInstrStart = 0;
9630 pVCpu->iem.s.offInstrNextByte = 0;
9631 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9632#else
9633 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9634 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9635#endif
9636 rcStrict = VINF_SUCCESS;
9637 }
9638 else
9639 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9640 if (rcStrict == VINF_SUCCESS)
9641 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9642 else if (pVCpu->iem.s.cActiveMappings > 0)
9643 iemMemRollback(pVCpu);
9644
9645 return rcStrict;
9646}
9647
9648
9649/**
9650 * For handling split cacheline lock operations when the host has split-lock
9651 * detection enabled.
9652 *
9653 * This will cause the interpreter to disregard the lock prefix and implicit
9654 * locking (xchg).
9655 *
9656 * @returns Strict VBox status code.
9657 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9658 */
9659VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9660{
9661 /*
9662 * Do the decoding and emulation.
9663 */
9664 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9665 if (rcStrict == VINF_SUCCESS)
9666 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9667 else if (pVCpu->iem.s.cActiveMappings > 0)
9668 iemMemRollback(pVCpu);
9669
9670 if (rcStrict != VINF_SUCCESS)
9671 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9672 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9673 return rcStrict;
9674}
9675
9676
9677/**
9678 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9679 * inject a pending TRPM trap.
9680 */
9681VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9682{
9683 Assert(TRPMHasTrap(pVCpu));
9684
9685 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9686 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9687 {
9688 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9689#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9690 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9691 if (fIntrEnabled)
9692 {
9693 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9694 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9695 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9696 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9697 else
9698 {
9699 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9700 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9701 }
9702 }
9703#else
9704 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9705#endif
9706 if (fIntrEnabled)
9707 {
9708 uint8_t u8TrapNo;
9709 TRPMEVENT enmType;
9710 uint32_t uErrCode;
9711 RTGCPTR uCr2;
9712 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9713 AssertRC(rc2);
9714 Assert(enmType == TRPM_HARDWARE_INT);
9715 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9716
9717 TRPMResetTrap(pVCpu);
9718
9719#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9720 /* Injecting an event may cause a VM-exit. */
9721 if ( rcStrict != VINF_SUCCESS
9722 && rcStrict != VINF_IEM_RAISED_XCPT)
9723 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9724#else
9725 NOREF(rcStrict);
9726#endif
9727 }
9728 }
9729
9730 return VINF_SUCCESS;
9731}
9732
9733
9734VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9735{
9736 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9737 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9738 Assert(cMaxInstructions > 0);
9739
9740 /*
9741 * See if there is an interrupt pending in TRPM, inject it if we can.
9742 */
9743 /** @todo What if we are injecting an exception and not an interrupt? Is that
9744 * possible here? For now we assert it is indeed only an interrupt. */
9745 if (!TRPMHasTrap(pVCpu))
9746 { /* likely */ }
9747 else
9748 {
9749 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9750 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9751 { /*likely */ }
9752 else
9753 return rcStrict;
9754 }
9755
9756 /*
9757 * Initial decoder init w/ prefetch, then setup setjmp.
9758 */
9759 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9760 if (rcStrict == VINF_SUCCESS)
9761 {
9762#ifdef IEM_WITH_SETJMP
9763 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9764 IEM_TRY_SETJMP(pVCpu, rcStrict)
9765#endif
9766 {
9767 /*
9768 * The run loop. We limit ourselves to 4096 instructions right now.
9769 */
9770 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9771 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9772 for (;;)
9773 {
9774 /*
9775 * Log the state.
9776 */
9777#ifdef LOG_ENABLED
9778 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9779#endif
9780
9781 /*
9782 * Do the decoding and emulation.
9783 */
9784 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9785 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9786#ifdef VBOX_STRICT
9787 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9788#endif
9789 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9790 {
9791 Assert(pVCpu->iem.s.cActiveMappings == 0);
9792 pVCpu->iem.s.cInstructions++;
9793
9794#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9795 /* Perform any VMX nested-guest instruction boundary actions. */
9796 uint64_t fCpu = pVCpu->fLocalForcedActions;
9797 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9798 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9799 { /* likely */ }
9800 else
9801 {
9802 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9803 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9804 fCpu = pVCpu->fLocalForcedActions;
9805 else
9806 {
9807 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9808 break;
9809 }
9810 }
9811#endif
9812 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9813 {
9814#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9815 uint64_t fCpu = pVCpu->fLocalForcedActions;
9816#endif
9817 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9818 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9819 | VMCPU_FF_TLB_FLUSH
9820 | VMCPU_FF_UNHALT );
9821
9822 if (RT_LIKELY( ( !fCpu
9823 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9824 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9825 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9826 {
9827 if (--cMaxInstructionsGccStupidity > 0)
9828 {
9829 /* Poll timers every now an then according to the caller's specs. */
9830 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9831 || !TMTimerPollBool(pVM, pVCpu))
9832 {
9833 Assert(pVCpu->iem.s.cActiveMappings == 0);
9834 iemReInitDecoder(pVCpu);
9835 continue;
9836 }
9837 }
9838 }
9839 }
9840 Assert(pVCpu->iem.s.cActiveMappings == 0);
9841 }
9842 else if (pVCpu->iem.s.cActiveMappings > 0)
9843 iemMemRollback(pVCpu);
9844 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9845 break;
9846 }
9847 }
9848#ifdef IEM_WITH_SETJMP
9849 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9850 {
9851 if (pVCpu->iem.s.cActiveMappings > 0)
9852 iemMemRollback(pVCpu);
9853# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9854 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9855# endif
9856 pVCpu->iem.s.cLongJumps++;
9857 }
9858 IEM_CATCH_LONGJMP_END(pVCpu);
9859#endif
9860
9861 /*
9862 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9863 */
9864 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9865 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9866 }
9867 else
9868 {
9869 if (pVCpu->iem.s.cActiveMappings > 0)
9870 iemMemRollback(pVCpu);
9871
9872#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9873 /*
9874 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9875 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9876 */
9877 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9878#endif
9879 }
9880
9881 /*
9882 * Maybe re-enter raw-mode and log.
9883 */
9884 if (rcStrict != VINF_SUCCESS)
9885 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9886 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9887 if (pcInstructions)
9888 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9889 return rcStrict;
9890}
9891
9892
9893/**
9894 * Interface used by EMExecuteExec, does exit statistics and limits.
9895 *
9896 * @returns Strict VBox status code.
9897 * @param pVCpu The cross context virtual CPU structure.
9898 * @param fWillExit To be defined.
9899 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9900 * @param cMaxInstructions Maximum number of instructions to execute.
9901 * @param cMaxInstructionsWithoutExits
9902 * The max number of instructions without exits.
9903 * @param pStats Where to return statistics.
9904 */
9905VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9906 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9907{
9908 NOREF(fWillExit); /** @todo define flexible exit crits */
9909
9910 /*
9911 * Initialize return stats.
9912 */
9913 pStats->cInstructions = 0;
9914 pStats->cExits = 0;
9915 pStats->cMaxExitDistance = 0;
9916 pStats->cReserved = 0;
9917
9918 /*
9919 * Initial decoder init w/ prefetch, then setup setjmp.
9920 */
9921 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9922 if (rcStrict == VINF_SUCCESS)
9923 {
9924#ifdef IEM_WITH_SETJMP
9925 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9926 IEM_TRY_SETJMP(pVCpu, rcStrict)
9927#endif
9928 {
9929#ifdef IN_RING0
9930 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9931#endif
9932 uint32_t cInstructionSinceLastExit = 0;
9933
9934 /*
9935 * The run loop. We limit ourselves to 4096 instructions right now.
9936 */
9937 PVM pVM = pVCpu->CTX_SUFF(pVM);
9938 for (;;)
9939 {
9940 /*
9941 * Log the state.
9942 */
9943#ifdef LOG_ENABLED
9944 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9945#endif
9946
9947 /*
9948 * Do the decoding and emulation.
9949 */
9950 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9951
9952 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9953 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9954
9955 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9956 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9957 {
9958 pStats->cExits += 1;
9959 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9960 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9961 cInstructionSinceLastExit = 0;
9962 }
9963
9964 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9965 {
9966 Assert(pVCpu->iem.s.cActiveMappings == 0);
9967 pVCpu->iem.s.cInstructions++;
9968 pStats->cInstructions++;
9969 cInstructionSinceLastExit++;
9970
9971#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9972 /* Perform any VMX nested-guest instruction boundary actions. */
9973 uint64_t fCpu = pVCpu->fLocalForcedActions;
9974 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9975 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9976 { /* likely */ }
9977 else
9978 {
9979 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9980 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9981 fCpu = pVCpu->fLocalForcedActions;
9982 else
9983 {
9984 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9985 break;
9986 }
9987 }
9988#endif
9989 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9990 {
9991#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9992 uint64_t fCpu = pVCpu->fLocalForcedActions;
9993#endif
9994 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9995 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9996 | VMCPU_FF_TLB_FLUSH
9997 | VMCPU_FF_UNHALT );
9998 if (RT_LIKELY( ( ( !fCpu
9999 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10000 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10001 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10002 || pStats->cInstructions < cMinInstructions))
10003 {
10004 if (pStats->cInstructions < cMaxInstructions)
10005 {
10006 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10007 {
10008#ifdef IN_RING0
10009 if ( !fCheckPreemptionPending
10010 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10011#endif
10012 {
10013 Assert(pVCpu->iem.s.cActiveMappings == 0);
10014 iemReInitDecoder(pVCpu);
10015 continue;
10016 }
10017#ifdef IN_RING0
10018 rcStrict = VINF_EM_RAW_INTERRUPT;
10019 break;
10020#endif
10021 }
10022 }
10023 }
10024 Assert(!(fCpu & VMCPU_FF_IEM));
10025 }
10026 Assert(pVCpu->iem.s.cActiveMappings == 0);
10027 }
10028 else if (pVCpu->iem.s.cActiveMappings > 0)
10029 iemMemRollback(pVCpu);
10030 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10031 break;
10032 }
10033 }
10034#ifdef IEM_WITH_SETJMP
10035 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10036 {
10037 if (pVCpu->iem.s.cActiveMappings > 0)
10038 iemMemRollback(pVCpu);
10039 pVCpu->iem.s.cLongJumps++;
10040 }
10041 IEM_CATCH_LONGJMP_END(pVCpu);
10042#endif
10043
10044 /*
10045 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10046 */
10047 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10048 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10049 }
10050 else
10051 {
10052 if (pVCpu->iem.s.cActiveMappings > 0)
10053 iemMemRollback(pVCpu);
10054
10055#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10056 /*
10057 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10058 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10059 */
10060 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10061#endif
10062 }
10063
10064 /*
10065 * Maybe re-enter raw-mode and log.
10066 */
10067 if (rcStrict != VINF_SUCCESS)
10068 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10069 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10070 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10071 return rcStrict;
10072}
10073
10074
10075/**
10076 * Injects a trap, fault, abort, software interrupt or external interrupt.
10077 *
10078 * The parameter list matches TRPMQueryTrapAll pretty closely.
10079 *
10080 * @returns Strict VBox status code.
10081 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10082 * @param u8TrapNo The trap number.
10083 * @param enmType What type is it (trap/fault/abort), software
10084 * interrupt or hardware interrupt.
10085 * @param uErrCode The error code if applicable.
10086 * @param uCr2 The CR2 value if applicable.
10087 * @param cbInstr The instruction length (only relevant for
10088 * software interrupts).
10089 */
10090VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10091 uint8_t cbInstr)
10092{
10093 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10094#ifdef DBGFTRACE_ENABLED
10095 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10096 u8TrapNo, enmType, uErrCode, uCr2);
10097#endif
10098
10099 uint32_t fFlags;
10100 switch (enmType)
10101 {
10102 case TRPM_HARDWARE_INT:
10103 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10104 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10105 uErrCode = uCr2 = 0;
10106 break;
10107
10108 case TRPM_SOFTWARE_INT:
10109 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10110 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10111 uErrCode = uCr2 = 0;
10112 break;
10113
10114 case TRPM_TRAP:
10115 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
10116 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10117 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10118 if (u8TrapNo == X86_XCPT_PF)
10119 fFlags |= IEM_XCPT_FLAGS_CR2;
10120 switch (u8TrapNo)
10121 {
10122 case X86_XCPT_DF:
10123 case X86_XCPT_TS:
10124 case X86_XCPT_NP:
10125 case X86_XCPT_SS:
10126 case X86_XCPT_PF:
10127 case X86_XCPT_AC:
10128 case X86_XCPT_GP:
10129 fFlags |= IEM_XCPT_FLAGS_ERR;
10130 break;
10131 }
10132 break;
10133
10134 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10135 }
10136
10137 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10138
10139 if (pVCpu->iem.s.cActiveMappings > 0)
10140 iemMemRollback(pVCpu);
10141
10142 return rcStrict;
10143}
10144
10145
10146/**
10147 * Injects the active TRPM event.
10148 *
10149 * @returns Strict VBox status code.
10150 * @param pVCpu The cross context virtual CPU structure.
10151 */
10152VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10153{
10154#ifndef IEM_IMPLEMENTS_TASKSWITCH
10155 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10156#else
10157 uint8_t u8TrapNo;
10158 TRPMEVENT enmType;
10159 uint32_t uErrCode;
10160 RTGCUINTPTR uCr2;
10161 uint8_t cbInstr;
10162 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10163 if (RT_FAILURE(rc))
10164 return rc;
10165
10166 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10167 * ICEBP \#DB injection as a special case. */
10168 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10169#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10170 if (rcStrict == VINF_SVM_VMEXIT)
10171 rcStrict = VINF_SUCCESS;
10172#endif
10173#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10174 if (rcStrict == VINF_VMX_VMEXIT)
10175 rcStrict = VINF_SUCCESS;
10176#endif
10177 /** @todo Are there any other codes that imply the event was successfully
10178 * delivered to the guest? See @bugref{6607}. */
10179 if ( rcStrict == VINF_SUCCESS
10180 || rcStrict == VINF_IEM_RAISED_XCPT)
10181 TRPMResetTrap(pVCpu);
10182
10183 return rcStrict;
10184#endif
10185}
10186
10187
10188VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10189{
10190 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10191 return VERR_NOT_IMPLEMENTED;
10192}
10193
10194
10195VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10196{
10197 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10198 return VERR_NOT_IMPLEMENTED;
10199}
10200
10201
10202/**
10203 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10204 *
10205 * This API ASSUMES that the caller has already verified that the guest code is
10206 * allowed to access the I/O port. (The I/O port is in the DX register in the
10207 * guest state.)
10208 *
10209 * @returns Strict VBox status code.
10210 * @param pVCpu The cross context virtual CPU structure.
10211 * @param cbValue The size of the I/O port access (1, 2, or 4).
10212 * @param enmAddrMode The addressing mode.
10213 * @param fRepPrefix Indicates whether a repeat prefix is used
10214 * (doesn't matter which for this instruction).
10215 * @param cbInstr The instruction length in bytes.
10216 * @param iEffSeg The effective segment address.
10217 * @param fIoChecked Whether the access to the I/O port has been
10218 * checked or not. It's typically checked in the
10219 * HM scenario.
10220 */
10221VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10222 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10223{
10224 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10225 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10226
10227 /*
10228 * State init.
10229 */
10230 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10231
10232 /*
10233 * Switch orgy for getting to the right handler.
10234 */
10235 VBOXSTRICTRC rcStrict;
10236 if (fRepPrefix)
10237 {
10238 switch (enmAddrMode)
10239 {
10240 case IEMMODE_16BIT:
10241 switch (cbValue)
10242 {
10243 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10244 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10245 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10246 default:
10247 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10248 }
10249 break;
10250
10251 case IEMMODE_32BIT:
10252 switch (cbValue)
10253 {
10254 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10255 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10256 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10257 default:
10258 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10259 }
10260 break;
10261
10262 case IEMMODE_64BIT:
10263 switch (cbValue)
10264 {
10265 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10266 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10267 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10268 default:
10269 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10270 }
10271 break;
10272
10273 default:
10274 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10275 }
10276 }
10277 else
10278 {
10279 switch (enmAddrMode)
10280 {
10281 case IEMMODE_16BIT:
10282 switch (cbValue)
10283 {
10284 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10285 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10286 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10287 default:
10288 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10289 }
10290 break;
10291
10292 case IEMMODE_32BIT:
10293 switch (cbValue)
10294 {
10295 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10296 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10297 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10298 default:
10299 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10300 }
10301 break;
10302
10303 case IEMMODE_64BIT:
10304 switch (cbValue)
10305 {
10306 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10307 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10308 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10309 default:
10310 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10311 }
10312 break;
10313
10314 default:
10315 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10316 }
10317 }
10318
10319 if (pVCpu->iem.s.cActiveMappings)
10320 iemMemRollback(pVCpu);
10321
10322 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10323}
10324
10325
10326/**
10327 * Interface for HM and EM for executing string I/O IN (read) instructions.
10328 *
10329 * This API ASSUMES that the caller has already verified that the guest code is
10330 * allowed to access the I/O port. (The I/O port is in the DX register in the
10331 * guest state.)
10332 *
10333 * @returns Strict VBox status code.
10334 * @param pVCpu The cross context virtual CPU structure.
10335 * @param cbValue The size of the I/O port access (1, 2, or 4).
10336 * @param enmAddrMode The addressing mode.
10337 * @param fRepPrefix Indicates whether a repeat prefix is used
10338 * (doesn't matter which for this instruction).
10339 * @param cbInstr The instruction length in bytes.
10340 * @param fIoChecked Whether the access to the I/O port has been
10341 * checked or not. It's typically checked in the
10342 * HM scenario.
10343 */
10344VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10345 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10346{
10347 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10348
10349 /*
10350 * State init.
10351 */
10352 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10353
10354 /*
10355 * Switch orgy for getting to the right handler.
10356 */
10357 VBOXSTRICTRC rcStrict;
10358 if (fRepPrefix)
10359 {
10360 switch (enmAddrMode)
10361 {
10362 case IEMMODE_16BIT:
10363 switch (cbValue)
10364 {
10365 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10366 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10367 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10368 default:
10369 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10370 }
10371 break;
10372
10373 case IEMMODE_32BIT:
10374 switch (cbValue)
10375 {
10376 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10377 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10378 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10379 default:
10380 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10381 }
10382 break;
10383
10384 case IEMMODE_64BIT:
10385 switch (cbValue)
10386 {
10387 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10388 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10389 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10390 default:
10391 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10392 }
10393 break;
10394
10395 default:
10396 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10397 }
10398 }
10399 else
10400 {
10401 switch (enmAddrMode)
10402 {
10403 case IEMMODE_16BIT:
10404 switch (cbValue)
10405 {
10406 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10407 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10408 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10409 default:
10410 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10411 }
10412 break;
10413
10414 case IEMMODE_32BIT:
10415 switch (cbValue)
10416 {
10417 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10418 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10419 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10420 default:
10421 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10422 }
10423 break;
10424
10425 case IEMMODE_64BIT:
10426 switch (cbValue)
10427 {
10428 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10429 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10430 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10431 default:
10432 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10433 }
10434 break;
10435
10436 default:
10437 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10438 }
10439 }
10440
10441 if ( pVCpu->iem.s.cActiveMappings == 0
10442 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10443 { /* likely */ }
10444 else
10445 {
10446 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10447 iemMemRollback(pVCpu);
10448 }
10449 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10450}
10451
10452
10453/**
10454 * Interface for rawmode to write execute an OUT instruction.
10455 *
10456 * @returns Strict VBox status code.
10457 * @param pVCpu The cross context virtual CPU structure.
10458 * @param cbInstr The instruction length in bytes.
10459 * @param u16Port The port to read.
10460 * @param fImm Whether the port is specified using an immediate operand or
10461 * using the implicit DX register.
10462 * @param cbReg The register size.
10463 *
10464 * @remarks In ring-0 not all of the state needs to be synced in.
10465 */
10466VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10467{
10468 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10469 Assert(cbReg <= 4 && cbReg != 3);
10470
10471 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10472 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10473 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10474 Assert(!pVCpu->iem.s.cActiveMappings);
10475 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10476}
10477
10478
10479/**
10480 * Interface for rawmode to write execute an IN instruction.
10481 *
10482 * @returns Strict VBox status code.
10483 * @param pVCpu The cross context virtual CPU structure.
10484 * @param cbInstr The instruction length in bytes.
10485 * @param u16Port The port to read.
10486 * @param fImm Whether the port is specified using an immediate operand or
10487 * using the implicit DX.
10488 * @param cbReg The register size.
10489 */
10490VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10491{
10492 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10493 Assert(cbReg <= 4 && cbReg != 3);
10494
10495 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10496 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10497 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10498 Assert(!pVCpu->iem.s.cActiveMappings);
10499 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10500}
10501
10502
10503/**
10504 * Interface for HM and EM to write to a CRx register.
10505 *
10506 * @returns Strict VBox status code.
10507 * @param pVCpu The cross context virtual CPU structure.
10508 * @param cbInstr The instruction length in bytes.
10509 * @param iCrReg The control register number (destination).
10510 * @param iGReg The general purpose register number (source).
10511 *
10512 * @remarks In ring-0 not all of the state needs to be synced in.
10513 */
10514VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10515{
10516 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10517 Assert(iCrReg < 16);
10518 Assert(iGReg < 16);
10519
10520 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10521 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10522 Assert(!pVCpu->iem.s.cActiveMappings);
10523 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10524}
10525
10526
10527/**
10528 * Interface for HM and EM to read from a CRx register.
10529 *
10530 * @returns Strict VBox status code.
10531 * @param pVCpu The cross context virtual CPU structure.
10532 * @param cbInstr The instruction length in bytes.
10533 * @param iGReg The general purpose register number (destination).
10534 * @param iCrReg The control register number (source).
10535 *
10536 * @remarks In ring-0 not all of the state needs to be synced in.
10537 */
10538VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10539{
10540 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10541 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10542 | CPUMCTX_EXTRN_APIC_TPR);
10543 Assert(iCrReg < 16);
10544 Assert(iGReg < 16);
10545
10546 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10547 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10548 Assert(!pVCpu->iem.s.cActiveMappings);
10549 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10550}
10551
10552
10553/**
10554 * Interface for HM and EM to write to a DRx register.
10555 *
10556 * @returns Strict VBox status code.
10557 * @param pVCpu The cross context virtual CPU structure.
10558 * @param cbInstr The instruction length in bytes.
10559 * @param iDrReg The debug register number (destination).
10560 * @param iGReg The general purpose register number (source).
10561 *
10562 * @remarks In ring-0 not all of the state needs to be synced in.
10563 */
10564VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10565{
10566 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10567 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10568 Assert(iDrReg < 8);
10569 Assert(iGReg < 16);
10570
10571 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10572 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10573 Assert(!pVCpu->iem.s.cActiveMappings);
10574 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10575}
10576
10577
10578/**
10579 * Interface for HM and EM to read from a DRx register.
10580 *
10581 * @returns Strict VBox status code.
10582 * @param pVCpu The cross context virtual CPU structure.
10583 * @param cbInstr The instruction length in bytes.
10584 * @param iGReg The general purpose register number (destination).
10585 * @param iDrReg The debug register number (source).
10586 *
10587 * @remarks In ring-0 not all of the state needs to be synced in.
10588 */
10589VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10590{
10591 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10592 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10593 Assert(iDrReg < 8);
10594 Assert(iGReg < 16);
10595
10596 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10597 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10598 Assert(!pVCpu->iem.s.cActiveMappings);
10599 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10600}
10601
10602
10603/**
10604 * Interface for HM and EM to clear the CR0[TS] bit.
10605 *
10606 * @returns Strict VBox status code.
10607 * @param pVCpu The cross context virtual CPU structure.
10608 * @param cbInstr The instruction length in bytes.
10609 *
10610 * @remarks In ring-0 not all of the state needs to be synced in.
10611 */
10612VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10613{
10614 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10615
10616 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10617 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10618 Assert(!pVCpu->iem.s.cActiveMappings);
10619 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10620}
10621
10622
10623/**
10624 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10625 *
10626 * @returns Strict VBox status code.
10627 * @param pVCpu The cross context virtual CPU structure.
10628 * @param cbInstr The instruction length in bytes.
10629 * @param uValue The value to load into CR0.
10630 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10631 * memory operand. Otherwise pass NIL_RTGCPTR.
10632 *
10633 * @remarks In ring-0 not all of the state needs to be synced in.
10634 */
10635VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10636{
10637 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10638
10639 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10640 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10641 Assert(!pVCpu->iem.s.cActiveMappings);
10642 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10643}
10644
10645
10646/**
10647 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10648 *
10649 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10650 *
10651 * @returns Strict VBox status code.
10652 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10653 * @param cbInstr The instruction length in bytes.
10654 * @remarks In ring-0 not all of the state needs to be synced in.
10655 * @thread EMT(pVCpu)
10656 */
10657VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10658{
10659 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10660
10661 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10662 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10663 Assert(!pVCpu->iem.s.cActiveMappings);
10664 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10665}
10666
10667
10668/**
10669 * Interface for HM and EM to emulate the WBINVD instruction.
10670 *
10671 * @returns Strict VBox status code.
10672 * @param pVCpu The cross context virtual CPU structure.
10673 * @param cbInstr The instruction length in bytes.
10674 *
10675 * @remarks In ring-0 not all of the state needs to be synced in.
10676 */
10677VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10678{
10679 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10680
10681 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10682 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10683 Assert(!pVCpu->iem.s.cActiveMappings);
10684 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10685}
10686
10687
10688/**
10689 * Interface for HM and EM to emulate the INVD instruction.
10690 *
10691 * @returns Strict VBox status code.
10692 * @param pVCpu The cross context virtual CPU structure.
10693 * @param cbInstr The instruction length in bytes.
10694 *
10695 * @remarks In ring-0 not all of the state needs to be synced in.
10696 */
10697VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10698{
10699 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10700
10701 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10702 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10703 Assert(!pVCpu->iem.s.cActiveMappings);
10704 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10705}
10706
10707
10708/**
10709 * Interface for HM and EM to emulate the INVLPG instruction.
10710 *
10711 * @returns Strict VBox status code.
10712 * @retval VINF_PGM_SYNC_CR3
10713 *
10714 * @param pVCpu The cross context virtual CPU structure.
10715 * @param cbInstr The instruction length in bytes.
10716 * @param GCPtrPage The effective address of the page to invalidate.
10717 *
10718 * @remarks In ring-0 not all of the state needs to be synced in.
10719 */
10720VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10721{
10722 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10723
10724 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10725 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10726 Assert(!pVCpu->iem.s.cActiveMappings);
10727 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10728}
10729
10730
10731/**
10732 * Interface for HM and EM to emulate the INVPCID instruction.
10733 *
10734 * @returns Strict VBox status code.
10735 * @retval VINF_PGM_SYNC_CR3
10736 *
10737 * @param pVCpu The cross context virtual CPU structure.
10738 * @param cbInstr The instruction length in bytes.
10739 * @param iEffSeg The effective segment register.
10740 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10741 * @param uType The invalidation type.
10742 *
10743 * @remarks In ring-0 not all of the state needs to be synced in.
10744 */
10745VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10746 uint64_t uType)
10747{
10748 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10749
10750 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10751 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10752 Assert(!pVCpu->iem.s.cActiveMappings);
10753 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10754}
10755
10756
10757/**
10758 * Interface for HM and EM to emulate the CPUID instruction.
10759 *
10760 * @returns Strict VBox status code.
10761 *
10762 * @param pVCpu The cross context virtual CPU structure.
10763 * @param cbInstr The instruction length in bytes.
10764 *
10765 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10766 */
10767VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10768{
10769 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10770 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10771
10772 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10773 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10774 Assert(!pVCpu->iem.s.cActiveMappings);
10775 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10776}
10777
10778
10779/**
10780 * Interface for HM and EM to emulate the RDPMC instruction.
10781 *
10782 * @returns Strict VBox status code.
10783 *
10784 * @param pVCpu The cross context virtual CPU structure.
10785 * @param cbInstr The instruction length in bytes.
10786 *
10787 * @remarks Not all of the state needs to be synced in.
10788 */
10789VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10790{
10791 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10792 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10793
10794 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10795 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10796 Assert(!pVCpu->iem.s.cActiveMappings);
10797 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10798}
10799
10800
10801/**
10802 * Interface for HM and EM to emulate the RDTSC instruction.
10803 *
10804 * @returns Strict VBox status code.
10805 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10806 *
10807 * @param pVCpu The cross context virtual CPU structure.
10808 * @param cbInstr The instruction length in bytes.
10809 *
10810 * @remarks Not all of the state needs to be synced in.
10811 */
10812VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10813{
10814 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10815 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10816
10817 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10818 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10819 Assert(!pVCpu->iem.s.cActiveMappings);
10820 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10821}
10822
10823
10824/**
10825 * Interface for HM and EM to emulate the RDTSCP instruction.
10826 *
10827 * @returns Strict VBox status code.
10828 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10829 *
10830 * @param pVCpu The cross context virtual CPU structure.
10831 * @param cbInstr The instruction length in bytes.
10832 *
10833 * @remarks Not all of the state needs to be synced in. Recommended
10834 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10835 */
10836VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10837{
10838 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10839 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10840
10841 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10842 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10843 Assert(!pVCpu->iem.s.cActiveMappings);
10844 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10845}
10846
10847
10848/**
10849 * Interface for HM and EM to emulate the RDMSR instruction.
10850 *
10851 * @returns Strict VBox status code.
10852 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10853 *
10854 * @param pVCpu The cross context virtual CPU structure.
10855 * @param cbInstr The instruction length in bytes.
10856 *
10857 * @remarks Not all of the state needs to be synced in. Requires RCX and
10858 * (currently) all MSRs.
10859 */
10860VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10861{
10862 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10863 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10864
10865 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10866 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10867 Assert(!pVCpu->iem.s.cActiveMappings);
10868 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10869}
10870
10871
10872/**
10873 * Interface for HM and EM to emulate the WRMSR instruction.
10874 *
10875 * @returns Strict VBox status code.
10876 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10877 *
10878 * @param pVCpu The cross context virtual CPU structure.
10879 * @param cbInstr The instruction length in bytes.
10880 *
10881 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10882 * and (currently) all MSRs.
10883 */
10884VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10885{
10886 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10887 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10888 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10889
10890 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10891 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10892 Assert(!pVCpu->iem.s.cActiveMappings);
10893 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10894}
10895
10896
10897/**
10898 * Interface for HM and EM to emulate the MONITOR instruction.
10899 *
10900 * @returns Strict VBox status code.
10901 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10902 *
10903 * @param pVCpu The cross context virtual CPU structure.
10904 * @param cbInstr The instruction length in bytes.
10905 *
10906 * @remarks Not all of the state needs to be synced in.
10907 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10908 * are used.
10909 */
10910VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10911{
10912 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10913 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10914
10915 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10916 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10917 Assert(!pVCpu->iem.s.cActiveMappings);
10918 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10919}
10920
10921
10922/**
10923 * Interface for HM and EM to emulate the MWAIT instruction.
10924 *
10925 * @returns Strict VBox status code.
10926 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10927 *
10928 * @param pVCpu The cross context virtual CPU structure.
10929 * @param cbInstr The instruction length in bytes.
10930 *
10931 * @remarks Not all of the state needs to be synced in.
10932 */
10933VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10934{
10935 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10936 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10937
10938 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10939 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10940 Assert(!pVCpu->iem.s.cActiveMappings);
10941 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10942}
10943
10944
10945/**
10946 * Interface for HM and EM to emulate the HLT instruction.
10947 *
10948 * @returns Strict VBox status code.
10949 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10950 *
10951 * @param pVCpu The cross context virtual CPU structure.
10952 * @param cbInstr The instruction length in bytes.
10953 *
10954 * @remarks Not all of the state needs to be synced in.
10955 */
10956VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10957{
10958 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10959
10960 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10961 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10962 Assert(!pVCpu->iem.s.cActiveMappings);
10963 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10964}
10965
10966
10967/**
10968 * Checks if IEM is in the process of delivering an event (interrupt or
10969 * exception).
10970 *
10971 * @returns true if we're in the process of raising an interrupt or exception,
10972 * false otherwise.
10973 * @param pVCpu The cross context virtual CPU structure.
10974 * @param puVector Where to store the vector associated with the
10975 * currently delivered event, optional.
10976 * @param pfFlags Where to store th event delivery flags (see
10977 * IEM_XCPT_FLAGS_XXX), optional.
10978 * @param puErr Where to store the error code associated with the
10979 * event, optional.
10980 * @param puCr2 Where to store the CR2 associated with the event,
10981 * optional.
10982 * @remarks The caller should check the flags to determine if the error code and
10983 * CR2 are valid for the event.
10984 */
10985VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10986{
10987 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10988 if (fRaisingXcpt)
10989 {
10990 if (puVector)
10991 *puVector = pVCpu->iem.s.uCurXcpt;
10992 if (pfFlags)
10993 *pfFlags = pVCpu->iem.s.fCurXcpt;
10994 if (puErr)
10995 *puErr = pVCpu->iem.s.uCurXcptErr;
10996 if (puCr2)
10997 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10998 }
10999 return fRaisingXcpt;
11000}
11001
11002#ifdef IN_RING3
11003
11004/**
11005 * Handles the unlikely and probably fatal merge cases.
11006 *
11007 * @returns Merged status code.
11008 * @param rcStrict Current EM status code.
11009 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11010 * with @a rcStrict.
11011 * @param iMemMap The memory mapping index. For error reporting only.
11012 * @param pVCpu The cross context virtual CPU structure of the calling
11013 * thread, for error reporting only.
11014 */
11015DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11016 unsigned iMemMap, PVMCPUCC pVCpu)
11017{
11018 if (RT_FAILURE_NP(rcStrict))
11019 return rcStrict;
11020
11021 if (RT_FAILURE_NP(rcStrictCommit))
11022 return rcStrictCommit;
11023
11024 if (rcStrict == rcStrictCommit)
11025 return rcStrictCommit;
11026
11027 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11028 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11029 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11030 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11031 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11032 return VERR_IOM_FF_STATUS_IPE;
11033}
11034
11035
11036/**
11037 * Helper for IOMR3ProcessForceFlag.
11038 *
11039 * @returns Merged status code.
11040 * @param rcStrict Current EM status code.
11041 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11042 * with @a rcStrict.
11043 * @param iMemMap The memory mapping index. For error reporting only.
11044 * @param pVCpu The cross context virtual CPU structure of the calling
11045 * thread, for error reporting only.
11046 */
11047DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11048{
11049 /* Simple. */
11050 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11051 return rcStrictCommit;
11052
11053 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11054 return rcStrict;
11055
11056 /* EM scheduling status codes. */
11057 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11058 && rcStrict <= VINF_EM_LAST))
11059 {
11060 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11061 && rcStrictCommit <= VINF_EM_LAST))
11062 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11063 }
11064
11065 /* Unlikely */
11066 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11067}
11068
11069
11070/**
11071 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11072 *
11073 * @returns Merge between @a rcStrict and what the commit operation returned.
11074 * @param pVM The cross context VM structure.
11075 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11076 * @param rcStrict The status code returned by ring-0 or raw-mode.
11077 */
11078VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11079{
11080 /*
11081 * Reset the pending commit.
11082 */
11083 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11084 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11085 ("%#x %#x %#x\n",
11086 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11087 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11088
11089 /*
11090 * Commit the pending bounce buffers (usually just one).
11091 */
11092 unsigned cBufs = 0;
11093 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11094 while (iMemMap-- > 0)
11095 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11096 {
11097 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11098 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11099 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11100
11101 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11102 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11103 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11104
11105 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11106 {
11107 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11108 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11109 pbBuf,
11110 cbFirst,
11111 PGMACCESSORIGIN_IEM);
11112 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11113 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11114 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11115 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11116 }
11117
11118 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11119 {
11120 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11121 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11122 pbBuf + cbFirst,
11123 cbSecond,
11124 PGMACCESSORIGIN_IEM);
11125 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11126 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11127 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11128 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11129 }
11130 cBufs++;
11131 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11132 }
11133
11134 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11135 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11136 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11137 pVCpu->iem.s.cActiveMappings = 0;
11138 return rcStrict;
11139}
11140
11141#endif /* IN_RING3 */
11142
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette