VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 105440

Last change on this file since 105440 was 105440, checked in by vboxsync, 10 months ago

VMM/IEM: Added some simple TLB tracing (disabled by default). bugref:10727

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 459.7 KB
Line 
1/* $Id: IEMAll.cpp 105440 2024-07-23 10:50:17Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gcm.h>
134#include <VBox/vmm/gim.h>
135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
136# include <VBox/vmm/em.h>
137# include <VBox/vmm/hm_svm.h>
138#endif
139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
140# include <VBox/vmm/hmvmxinline.h>
141#endif
142#include <VBox/vmm/tm.h>
143#include <VBox/vmm/dbgf.h>
144#include <VBox/vmm/dbgftrace.h>
145#include "IEMInternal.h"
146#include <VBox/vmm/vmcc.h>
147#include <VBox/log.h>
148#include <VBox/err.h>
149#include <VBox/param.h>
150#include <VBox/dis.h>
151#include <iprt/asm-math.h>
152#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
153# include <iprt/asm-amd64-x86.h>
154#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
155# include <iprt/asm-arm.h>
156#endif
157#include <iprt/assert.h>
158#include <iprt/string.h>
159#include <iprt/x86.h>
160
161#include "IEMInline.h"
162
163
164/*********************************************************************************************************************************
165* Structures and Typedefs *
166*********************************************************************************************************************************/
167/**
168 * CPU exception classes.
169 */
170typedef enum IEMXCPTCLASS
171{
172 IEMXCPTCLASS_BENIGN,
173 IEMXCPTCLASS_CONTRIBUTORY,
174 IEMXCPTCLASS_PAGE_FAULT,
175 IEMXCPTCLASS_DOUBLE_FAULT
176} IEMXCPTCLASS;
177
178
179/*********************************************************************************************************************************
180* Global Variables *
181*********************************************************************************************************************************/
182#if defined(IEM_LOG_MEMORY_WRITES)
183/** What IEM just wrote. */
184uint8_t g_abIemWrote[256];
185/** How much IEM just wrote. */
186size_t g_cbIemWrote;
187#endif
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
194 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
195
196
197/**
198 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
199 * path.
200 *
201 * This will also invalidate TLB entries for any pages with active data
202 * breakpoints on them.
203 *
204 * @returns IEM_F_BRK_PENDING_XXX or zero.
205 * @param pVCpu The cross context virtual CPU structure of the
206 * calling thread.
207 *
208 * @note Don't call directly, use iemCalcExecDbgFlags instead.
209 */
210uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
211{
212 uint32_t fExec = 0;
213
214 /*
215 * Helper for invalidate the data TLB for breakpoint addresses.
216 *
217 * This is to make sure any access to the page will always trigger a TLB
218 * load for as long as the breakpoint is enabled.
219 */
220#ifdef IEM_WITH_DATA_TLB
221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
222 RTGCPTR uTagNoRev = (a_uValue); \
223 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
224 /** @todo do large page accounting */ \
225 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
226 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
227 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
228 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
229 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
230 } while (0)
231#else
232# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
233#endif
234
235 /*
236 * Process guest breakpoints.
237 */
238#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
239 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
240 { \
241 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
242 { \
243 case X86_DR7_RW_EO: \
244 fExec |= IEM_F_PENDING_BRK_INSTR; \
245 break; \
246 case X86_DR7_RW_WO: \
247 case X86_DR7_RW_RW: \
248 fExec |= IEM_F_PENDING_BRK_DATA; \
249 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
250 break; \
251 case X86_DR7_RW_IO: \
252 fExec |= IEM_F_PENDING_BRK_X86_IO; \
253 break; \
254 } \
255 } \
256 } while (0)
257
258 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
259 if (fGstDr7 & X86_DR7_ENABLED_MASK)
260 {
261/** @todo extract more details here to simplify matching later. */
262#ifdef IEM_WITH_DATA_TLB
263 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
264#endif
265 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
266 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
267 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
268 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
269 }
270
271 /*
272 * Process hypervisor breakpoints.
273 */
274 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
275 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
276 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
277 {
278/** @todo extract more details here to simplify matching later. */
279 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
280 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
281 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
282 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
283 }
284
285 return fExec;
286}
287
288
289/**
290 * Initializes the decoder state.
291 *
292 * iemReInitDecoder is mostly a copy of this function.
293 *
294 * @param pVCpu The cross context virtual CPU structure of the
295 * calling thread.
296 * @param fExecOpts Optional execution flags:
297 * - IEM_F_BYPASS_HANDLERS
298 * - IEM_F_X86_DISREGARD_LOCK
299 */
300DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
301{
302 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
312
313 /* Execution state: */
314 uint32_t fExec;
315 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
316
317 /* Decoder state: */
318 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
319 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
320 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
321 {
322 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
323 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
324 }
325 else
326 {
327 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
328 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
329 }
330 pVCpu->iem.s.fPrefixes = 0;
331 pVCpu->iem.s.uRexReg = 0;
332 pVCpu->iem.s.uRexB = 0;
333 pVCpu->iem.s.uRexIndex = 0;
334 pVCpu->iem.s.idxPrefix = 0;
335 pVCpu->iem.s.uVex3rdReg = 0;
336 pVCpu->iem.s.uVexLength = 0;
337 pVCpu->iem.s.fEvexStuff = 0;
338 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
339#ifdef IEM_WITH_CODE_TLB
340 pVCpu->iem.s.pbInstrBuf = NULL;
341 pVCpu->iem.s.offInstrNextByte = 0;
342 pVCpu->iem.s.offCurInstrStart = 0;
343# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
344 pVCpu->iem.s.offOpcode = 0;
345# endif
346# ifdef VBOX_STRICT
347 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
348 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
349 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
350 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
351# endif
352#else
353 pVCpu->iem.s.offOpcode = 0;
354 pVCpu->iem.s.cbOpcode = 0;
355#endif
356 pVCpu->iem.s.offModRm = 0;
357 pVCpu->iem.s.cActiveMappings = 0;
358 pVCpu->iem.s.iNextMapping = 0;
359 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
360
361#ifdef DBGFTRACE_ENABLED
362 switch (IEM_GET_CPU_MODE(pVCpu))
363 {
364 case IEMMODE_64BIT:
365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
366 break;
367 case IEMMODE_32BIT:
368 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
369 break;
370 case IEMMODE_16BIT:
371 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
372 break;
373 }
374#endif
375}
376
377
378/**
379 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
380 *
381 * This is mostly a copy of iemInitDecoder.
382 *
383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
384 */
385DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
386{
387 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
395 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
396
397 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
398 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
399 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
400
401 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
402 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
403 pVCpu->iem.s.enmEffAddrMode = enmMode;
404 if (enmMode != IEMMODE_64BIT)
405 {
406 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
407 pVCpu->iem.s.enmEffOpSize = enmMode;
408 }
409 else
410 {
411 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
412 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
413 }
414 pVCpu->iem.s.fPrefixes = 0;
415 pVCpu->iem.s.uRexReg = 0;
416 pVCpu->iem.s.uRexB = 0;
417 pVCpu->iem.s.uRexIndex = 0;
418 pVCpu->iem.s.idxPrefix = 0;
419 pVCpu->iem.s.uVex3rdReg = 0;
420 pVCpu->iem.s.uVexLength = 0;
421 pVCpu->iem.s.fEvexStuff = 0;
422 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
423#ifdef IEM_WITH_CODE_TLB
424 if (pVCpu->iem.s.pbInstrBuf)
425 {
426 uint64_t off = (enmMode == IEMMODE_64BIT
427 ? pVCpu->cpum.GstCtx.rip
428 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
429 - pVCpu->iem.s.uInstrBufPc;
430 if (off < pVCpu->iem.s.cbInstrBufTotal)
431 {
432 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
433 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
434 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
435 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
436 else
437 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
438 }
439 else
440 {
441 pVCpu->iem.s.pbInstrBuf = NULL;
442 pVCpu->iem.s.offInstrNextByte = 0;
443 pVCpu->iem.s.offCurInstrStart = 0;
444 pVCpu->iem.s.cbInstrBuf = 0;
445 pVCpu->iem.s.cbInstrBufTotal = 0;
446 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
447 }
448 }
449 else
450 {
451 pVCpu->iem.s.offInstrNextByte = 0;
452 pVCpu->iem.s.offCurInstrStart = 0;
453 pVCpu->iem.s.cbInstrBuf = 0;
454 pVCpu->iem.s.cbInstrBufTotal = 0;
455# ifdef VBOX_STRICT
456 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
457# endif
458 }
459# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
460 pVCpu->iem.s.offOpcode = 0;
461# endif
462#else /* !IEM_WITH_CODE_TLB */
463 pVCpu->iem.s.cbOpcode = 0;
464 pVCpu->iem.s.offOpcode = 0;
465#endif /* !IEM_WITH_CODE_TLB */
466 pVCpu->iem.s.offModRm = 0;
467 Assert(pVCpu->iem.s.cActiveMappings == 0);
468 pVCpu->iem.s.iNextMapping = 0;
469 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
470 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
471
472#ifdef DBGFTRACE_ENABLED
473 switch (enmMode)
474 {
475 case IEMMODE_64BIT:
476 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
477 break;
478 case IEMMODE_32BIT:
479 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
480 break;
481 case IEMMODE_16BIT:
482 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
483 break;
484 }
485#endif
486}
487
488
489
490/**
491 * Prefetch opcodes the first time when starting executing.
492 *
493 * @returns Strict VBox status code.
494 * @param pVCpu The cross context virtual CPU structure of the
495 * calling thread.
496 * @param fExecOpts Optional execution flags:
497 * - IEM_F_BYPASS_HANDLERS
498 * - IEM_F_X86_DISREGARD_LOCK
499 */
500static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
501{
502 iemInitDecoder(pVCpu, fExecOpts);
503
504#ifndef IEM_WITH_CODE_TLB
505 /*
506 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
507 *
508 * First translate CS:rIP to a physical address.
509 *
510 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
511 * all relevant bytes from the first page, as it ASSUMES it's only ever
512 * called for dealing with CS.LIM, page crossing and instructions that
513 * are too long.
514 */
515 uint32_t cbToTryRead;
516 RTGCPTR GCPtrPC;
517 if (IEM_IS_64BIT_CODE(pVCpu))
518 {
519 cbToTryRead = GUEST_PAGE_SIZE;
520 GCPtrPC = pVCpu->cpum.GstCtx.rip;
521 if (IEM_IS_CANONICAL(GCPtrPC))
522 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
523 else
524 return iemRaiseGeneralProtectionFault0(pVCpu);
525 }
526 else
527 {
528 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
529 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
530 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
531 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
532 else
533 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
534 if (cbToTryRead) { /* likely */ }
535 else /* overflowed */
536 {
537 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
538 cbToTryRead = UINT32_MAX;
539 }
540 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
541 Assert(GCPtrPC <= UINT32_MAX);
542 }
543
544 PGMPTWALKFAST WalkFast;
545 int rc = PGMGstQueryPageFast(pVCpu, GCPtrPC,
546 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
547 &WalkFast);
548 if (RT_SUCCESS(rc))
549 Assert(WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED);
550 else
551 {
552 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
553# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
554/** @todo This isn't quite right yet, as PGM_GST_SLAT_NAME_EPT(Walk) doesn't
555 * know about what kind of access we're making! See PGM_GST_NAME(WalkFast). */
556 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
557 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
558# endif
559 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
560 }
561#if 0
562 if ((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
563 else
564 {
565 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
566# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
567/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
568# error completely wrong
569 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
570 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
571# endif
572 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
573 }
574 if (!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
575 else
576 {
577 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
578# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
579/** @todo this is completely wrong for EPT. WalkFast.fFailed is always zero here!*/
580# error completely wrong.
581 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
582 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
583# endif
584 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
585 }
586#else
587 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
588 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
589#endif
590 RTGCPHYS const GCPhys = WalkFast.GCPhys;
591
592 /*
593 * Read the bytes at this address.
594 */
595 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
596 if (cbToTryRead > cbLeftOnPage)
597 cbToTryRead = cbLeftOnPage;
598 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
599 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
600
601 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
602 {
603 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
604 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
605 { /* likely */ }
606 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
607 {
608 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
609 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
610 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
611 }
612 else
613 {
614 Log((RT_SUCCESS(rcStrict)
615 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
616 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
617 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
618 return rcStrict;
619 }
620 }
621 else
622 {
623 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
624 if (RT_SUCCESS(rc))
625 { /* likely */ }
626 else
627 {
628 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
629 GCPtrPC, GCPhys, rc, cbToTryRead));
630 return rc;
631 }
632 }
633 pVCpu->iem.s.cbOpcode = cbToTryRead;
634#endif /* !IEM_WITH_CODE_TLB */
635 return VINF_SUCCESS;
636}
637
638
639#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
640/**
641 * Helper for doing large page accounting at TLB load time.
642 */
643template<bool const a_fGlobal>
644DECL_FORCE_INLINE(void) iemTlbLoadedLargePage(IEMTLB *pTlb, RTGCPTR uTagNoRev, bool f2MbLargePages)
645{
646 if (a_fGlobal)
647 pTlb->cTlbGlobalLargePageCurLoads++;
648 else
649 pTlb->cTlbNonGlobalLargePageCurLoads++;
650
651 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
652 uint32_t const fMask = (f2MbLargePages ? _2M - 1U : _4M - 1U) >> GUEST_PAGE_SHIFT;
653 IEMTLB::LARGEPAGERANGE * const pRange = a_fGlobal
654 ? &pTlb->GlobalLargePageRange
655 : &pTlb->NonGlobalLargePageRange;
656 uTagNoRev &= ~(RTGCPTR)fMask;
657 if (uTagNoRev < pRange->uFirstTag)
658 pRange->uFirstTag = uTagNoRev;
659
660 uTagNoRev |= fMask;
661 if (uTagNoRev > pRange->uLastTag)
662 pRange->uLastTag = uTagNoRev;
663}
664#endif
665
666
667#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
668/**
669 * Worker for iemTlbInvalidateAll.
670 */
671template<bool a_fGlobal>
672DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
673{
674 if (!a_fGlobal)
675 pTlb->cTlsFlushes++;
676 else
677 pTlb->cTlsGlobalFlushes++;
678
679 pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
680 if (RT_LIKELY(pTlb->uTlbRevision != 0))
681 { /* very likely */ }
682 else
683 {
684 pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
685 pTlb->cTlbRevisionRollovers++;
686 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
687 while (i-- > 0)
688 pTlb->aEntries[i * 2].uTag = 0;
689 }
690
691 pTlb->cTlbNonGlobalLargePageCurLoads = 0;
692 pTlb->NonGlobalLargePageRange.uLastTag = 0;
693 pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
694
695 if (a_fGlobal)
696 {
697 pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
698 if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
699 { /* very likely */ }
700 else
701 {
702 pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
703 pTlb->cTlbRevisionRollovers++;
704 unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
705 while (i-- > 0)
706 pTlb->aEntries[i * 2 + 1].uTag = 0;
707 }
708
709 pTlb->cTlbGlobalLargePageCurLoads = 0;
710 pTlb->GlobalLargePageRange.uLastTag = 0;
711 pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
712 }
713}
714#endif
715
716
717/**
718 * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
719 */
720template<bool a_fGlobal>
721DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
722{
723#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
724 Log10(("IEMTlbInvalidateAll\n"));
725
726# ifdef IEM_WITH_CODE_TLB
727 pVCpu->iem.s.cbInstrBufTotal = 0;
728 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
729 if (a_fGlobal)
730 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
731 else
732 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
733# endif
734
735# ifdef IEM_WITH_DATA_TLB
736 iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
737 if (a_fGlobal)
738 IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
739 else
740 IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
741# endif
742#else
743 RT_NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates non-global the IEM TLB entries.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVCpu The cross context virtual CPU structure of the calling
754 * thread.
755 */
756VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
757{
758 iemTlbInvalidateAll<false>(pVCpu);
759}
760
761
762/**
763 * Invalidates all the IEM TLB entries.
764 *
765 * This is called internally as well as by PGM when moving GC mappings.
766 *
767 * @param pVCpu The cross context virtual CPU structure of the calling
768 * thread.
769 */
770VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
771{
772 iemTlbInvalidateAll<true>(pVCpu);
773}
774
775
776#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
777
778template<bool const a_fDataTlb, bool const a_f2MbLargePage, bool const a_fGlobal, bool const a_fNonGlobal>
779DECLINLINE(void) iemTlbInvalidateLargePageWorkerInner(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
780{
781 /* Combine TAG values with the TLB revisions. */
782 RTGCPTR GCPtrTagGlob = a_fGlobal ? GCPtrTag | pTlb->uTlbRevisionGlobal : 0;
783 if (a_fNonGlobal)
784 GCPtrTag |= pTlb->uTlbRevision;
785
786 /* Set up the scan. */
787 bool const fPartialScan = IEMTLB_ENTRY_COUNT >= (a_f2MbLargePage ? 512 : 1024);
788 uintptr_t idxEven = fPartialScan ? IEMTLB_TAG_TO_EVEN_INDEX(GCPtrTag) : 0;
789 uintptr_t const idxEvenEnd = fPartialScan ? idxEven + (a_f2MbLargePage ? 512 : 1024) : IEMTLB_ENTRY_COUNT;
790 RTGCPTR const GCPtrTagMask = fPartialScan
791 ? ~(RTGCPTR)0
792 : ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK
793 & ~(RTGCPTR)( ( RT_BIT_64((a_f2MbLargePage ? 9 : 10) - IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO)
794 - 1U)
795 << IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO);
796
797 /*
798 * Do the scanning.
799 */
800 for (idxEven = 0; idxEven < idxEvenEnd; idxEven += 2)
801 {
802 if (a_fNonGlobal)
803 {
804 if ((pTlb->aEntries[idxEven].uTag & GCPtrTagMask) == GCPtrTag)
805 {
806 if (pTlb->aEntries[idxEven].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
807 {
808 pTlb->aEntries[idxEven].uTag = 0;
809 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
810 pVCpu->iem.s.cbInstrBufTotal = 0;
811 }
812 }
813 GCPtrTag++;
814 }
815
816 if (a_fGlobal)
817 {
818 if ((pTlb->aEntries[idxEven + 1].uTag & GCPtrTagMask) == GCPtrTagGlob)
819 {
820 if (pTlb->aEntries[idxEven + 1].fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE)
821 {
822 pTlb->aEntries[idxEven + 1].uTag = 0;
823 if (!a_fDataTlb && GCPtrTag == GCPtrInstrBufPcTag)
824 pVCpu->iem.s.cbInstrBufTotal = 0;
825 }
826 }
827 GCPtrTagGlob++;
828 }
829 }
830
831}
832
833template<bool const a_fDataTlb, bool const a_f2MbLargePage>
834DECLINLINE(void) iemTlbInvalidateLargePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, RTGCPTR GCPtrInstrBufPcTag)
835{
836 AssertCompile(IEMTLB_CALC_TAG_NO_REV((RTGCPTR)0x8731U << GUEST_PAGE_SHIFT) == 0x8731U);
837
838 GCPtrTag &= ~(RTGCPTR)(RT_BIT_64((a_f2MbLargePage ? 21 : 22) - GUEST_PAGE_SHIFT) - 1U);
839 if ( pTlb->GlobalLargePageRange.uFirstTag >= GCPtrTag
840 && pTlb->GlobalLargePageRange.uLastTag <= GCPtrTag)
841 {
842 if ( pTlb->NonGlobalLargePageRange.uFirstTag < GCPtrTag
843 || pTlb->NonGlobalLargePageRange.uLastTag > GCPtrTag)
844 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
845 else
846 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, true, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
847 }
848 else if ( pTlb->NonGlobalLargePageRange.uFirstTag < GCPtrTag
849 || pTlb->NonGlobalLargePageRange.uLastTag > GCPtrTag)
850 { /* Large pages aren't as likely in the non-global TLB half. */ }
851 else
852 iemTlbInvalidateLargePageWorkerInner<a_fDataTlb, a_f2MbLargePage, false, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
853}
854
855template<bool const a_fDataTlb>
856DECLINLINE(void) iemTlbInvalidatePageWorker(PVMCPUCC pVCpu, IEMTLB *pTlb, RTGCPTR GCPtrTag, uintptr_t idxEven)
857{
858 /*
859 * Flush the entry pair.
860 */
861 if (pTlb->aEntries[idxEven].uTag == (GCPtrTag | pTlb->uTlbRevision))
862 {
863 pTlb->aEntries[idxEven].uTag = 0;
864 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
865 pVCpu->iem.s.cbInstrBufTotal = 0;
866 }
867 if (pTlb->aEntries[idxEven + 1].uTag == (GCPtrTag | pTlb->uTlbRevisionGlobal))
868 {
869 pTlb->aEntries[idxEven + 1].uTag = 0;
870 if (!a_fDataTlb && GCPtrTag == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
871 pVCpu->iem.s.cbInstrBufTotal = 0;
872 }
873
874 /*
875 * If there are (or has been) large pages in the TLB, we must check if the
876 * address being flushed may involve one of those, as then we'd have to
877 * scan for entries relating to the same page and flush those as well.
878 */
879# if 0 /** @todo do accurate counts or currently loaded large stuff and we can use those */
880 if (pTlb->cTlbGlobalLargePageCurLoads || pTlb->cTlbNonGlobalLargePageCurLoads)
881# else
882 if (pTlb->GlobalLargePageRange.uLastTag || pTlb->NonGlobalLargePageRange.uLastTag)
883# endif
884 {
885 RTGCPTR const GCPtrInstrBufPcTag = a_fDataTlb ? 0 : IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc);
886 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
887 iemTlbInvalidateLargePageWorker<a_fDataTlb, true>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
888 else
889 iemTlbInvalidateLargePageWorker<a_fDataTlb, false>(pVCpu, pTlb, GCPtrTag, GCPtrInstrBufPcTag);
890 }
891}
892
893#endif /* defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB) */
894
895/**
896 * Invalidates a page in the TLBs.
897 *
898 * @param pVCpu The cross context virtual CPU structure of the calling
899 * thread.
900 * @param GCPtr The address of the page to invalidate
901 * @thread EMT(pVCpu)
902 */
903VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
904{
905 IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
906#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
907 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
908 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
909 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
910 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
911
912# ifdef IEM_WITH_CODE_TLB
913 iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
914# endif
915# ifdef IEM_WITH_DATA_TLB
916 iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
917# endif
918#else
919 NOREF(pVCpu); NOREF(GCPtr);
920#endif
921}
922
923
924#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
925/**
926 * Invalid both TLBs slow fashion following a rollover.
927 *
928 * Worker for IEMTlbInvalidateAllPhysical,
929 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
930 * iemMemMapJmp and others.
931 *
932 * @thread EMT(pVCpu)
933 */
934static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
935{
936 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
937 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
938 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
939
940 unsigned i;
941# ifdef IEM_WITH_CODE_TLB
942 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
943 while (i-- > 0)
944 {
945 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
946 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
947 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
948 }
949 pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
950 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
951# endif
952# ifdef IEM_WITH_DATA_TLB
953 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
954 while (i-- > 0)
955 {
956 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
957 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
958 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
959 }
960 pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
961 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
962# endif
963
964}
965#endif
966
967
968/**
969 * Invalidates the host physical aspects of the IEM TLBs.
970 *
971 * This is called internally as well as by PGM when moving GC mappings.
972 *
973 * @param pVCpu The cross context virtual CPU structure of the calling
974 * thread.
975 * @note Currently not used.
976 */
977VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
978{
979#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
980 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
981 Log10(("IEMTlbInvalidateAllPhysical\n"));
982
983# ifdef IEM_WITH_CODE_TLB
984 pVCpu->iem.s.cbInstrBufTotal = 0;
985# endif
986 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
987 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
988 {
989 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
990 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
991 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
992 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
993 }
994 else
995 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
996#else
997 NOREF(pVCpu);
998#endif
999}
1000
1001
1002/**
1003 * Invalidates the host physical aspects of the IEM TLBs.
1004 *
1005 * This is called internally as well as by PGM when moving GC mappings.
1006 *
1007 * @param pVM The cross context VM structure.
1008 * @param idCpuCaller The ID of the calling EMT if available to the caller,
1009 * otherwise NIL_VMCPUID.
1010 * @param enmReason The reason we're called.
1011 *
1012 * @remarks Caller holds the PGM lock.
1013 */
1014VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
1015{
1016#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1017 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
1018 if (pVCpuCaller)
1019 VMCPU_ASSERT_EMT(pVCpuCaller);
1020 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
1021
1022 VMCC_FOR_EACH_VMCPU(pVM)
1023 {
1024# ifdef IEM_WITH_CODE_TLB
1025 if (pVCpuCaller == pVCpu)
1026 pVCpu->iem.s.cbInstrBufTotal = 0;
1027# endif
1028
1029 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
1030 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
1031 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
1032 { /* likely */}
1033 else if (pVCpuCaller != pVCpu)
1034 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
1035 else
1036 {
1037 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1038 continue;
1039 }
1040 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1041 pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
1042
1043 if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
1044 pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
1045 }
1046 VMCC_FOR_EACH_VMCPU_END(pVM);
1047
1048#else
1049 RT_NOREF(pVM, idCpuCaller, enmReason);
1050#endif
1051}
1052
1053
1054/**
1055 * Flushes the prefetch buffer, light version.
1056 */
1057void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
1058{
1059#ifndef IEM_WITH_CODE_TLB
1060 pVCpu->iem.s.cbOpcode = cbInstr;
1061#else
1062 RT_NOREF(pVCpu, cbInstr);
1063#endif
1064}
1065
1066
1067/**
1068 * Flushes the prefetch buffer, heavy version.
1069 */
1070void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
1071{
1072#ifndef IEM_WITH_CODE_TLB
1073 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
1074#elif 1
1075 pVCpu->iem.s.cbInstrBufTotal = 0;
1076 RT_NOREF(cbInstr);
1077#else
1078 RT_NOREF(pVCpu, cbInstr);
1079#endif
1080}
1081
1082
1083
1084#ifdef IEM_WITH_CODE_TLB
1085
1086/**
1087 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1088 * failure and jumps.
1089 *
1090 * We end up here for a number of reasons:
1091 * - pbInstrBuf isn't yet initialized.
1092 * - Advancing beyond the buffer boundrary (e.g. cross page).
1093 * - Advancing beyond the CS segment limit.
1094 * - Fetching from non-mappable page (e.g. MMIO).
1095 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
1096 *
1097 * @param pVCpu The cross context virtual CPU structure of the
1098 * calling thread.
1099 * @param pvDst Where to return the bytes.
1100 * @param cbDst Number of bytes to read. A value of zero is
1101 * allowed for initializing pbInstrBuf (the
1102 * recompiler does this). In this case it is best
1103 * to set pbInstrBuf to NULL prior to the call.
1104 */
1105void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
1106{
1107# ifdef IN_RING3
1108 for (;;)
1109 {
1110 Assert(cbDst <= 8);
1111 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1112
1113 /*
1114 * We might have a partial buffer match, deal with that first to make the
1115 * rest simpler. This is the first part of the cross page/buffer case.
1116 */
1117 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
1118 if (pbInstrBuf != NULL)
1119 {
1120 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
1121 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
1122 if (offBuf < cbInstrBuf)
1123 {
1124 Assert(offBuf + cbDst > cbInstrBuf);
1125 uint32_t const cbCopy = cbInstrBuf - offBuf;
1126 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
1127
1128 cbDst -= cbCopy;
1129 pvDst = (uint8_t *)pvDst + cbCopy;
1130 offBuf += cbCopy;
1131 }
1132 }
1133
1134 /*
1135 * Check segment limit, figuring how much we're allowed to access at this point.
1136 *
1137 * We will fault immediately if RIP is past the segment limit / in non-canonical
1138 * territory. If we do continue, there are one or more bytes to read before we
1139 * end up in trouble and we need to do that first before faulting.
1140 */
1141 RTGCPTR GCPtrFirst;
1142 uint32_t cbMaxRead;
1143 if (IEM_IS_64BIT_CODE(pVCpu))
1144 {
1145 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1146 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1147 { /* likely */ }
1148 else
1149 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1150 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1151 }
1152 else
1153 {
1154 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1155 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1156 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1157 { /* likely */ }
1158 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
1159 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1160 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1161 if (cbMaxRead != 0)
1162 { /* likely */ }
1163 else
1164 {
1165 /* Overflowed because address is 0 and limit is max. */
1166 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1167 cbMaxRead = X86_PAGE_SIZE;
1168 }
1169 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1170 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1171 if (cbMaxRead2 < cbMaxRead)
1172 cbMaxRead = cbMaxRead2;
1173 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1174 }
1175
1176 /*
1177 * Get the TLB entry for this piece of code.
1178 */
1179 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrFirst);
1180 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.CodeTlb, uTagNoRev);
1181 if ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision)
1182 || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal))
1183 {
1184 /* likely when executing lots of code, otherwise unlikely */
1185# ifdef IEM_WITH_TLB_STATISTICS
1186 pVCpu->iem.s.CodeTlb.cTlbCoreHits++;
1187# endif
1188 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1189
1190 /* Check TLB page table level access flags. */
1191 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1192 {
1193 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
1194 {
1195 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1196 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1197 }
1198 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1199 {
1200 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1201 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1202 }
1203 }
1204
1205 /* Look up the physical page info if necessary. */
1206 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1207 { /* not necessary */ }
1208 else
1209 {
1210 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1211 { /* likely */ }
1212 else
1213 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1214 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
1215 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1216 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1217 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1218 }
1219 }
1220 else
1221 {
1222 pVCpu->iem.s.CodeTlb.cTlbCoreMisses++;
1223
1224 /* This page table walking will set A bits as required by the access while performing the walk.
1225 ASSUMES these are set when the address is translated rather than on commit... */
1226 /** @todo testcase: check when A bits are actually set by the CPU for code. */
1227 PGMPTWALKFAST WalkFast;
1228 int rc = PGMGstQueryPageFast(pVCpu, GCPtrFirst,
1229 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1230 &WalkFast);
1231 if (RT_SUCCESS(rc))
1232 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1233 else
1234 {
1235#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1236 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? OF COURSE! */
1237 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1238#endif
1239 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1240 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
1241 }
1242
1243 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1244 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
1245 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
1246 {
1247 pTlbe--;
1248 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevision;
1249 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1250 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1251 IEMTLBTRACE_LOAD(pVCpu, GCPtrFirst, false);
1252 }
1253 else
1254 {
1255 pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads++;
1256 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal;
1257 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
1258 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.CodeTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
1259 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrFirst, false);
1260 }
1261 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
1262 | (WalkFast.fEffective >> X86_PTE_PAE_BIT_NX) /*IEMTLBE_F_PT_NO_EXEC*/
1263 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
1264 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1265 pTlbe->GCPhys = GCPhysPg;
1266 pTlbe->pbMappingR3 = NULL;
1267 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1268 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) || IEM_GET_CPL(pVCpu) != 3);
1269 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
1270
1271 /* Resolve the physical address. */
1272 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
1273 { /* likely */ }
1274 else
1275 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1276 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
1277 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1278 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1279 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1280 }
1281
1282# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1283 /*
1284 * Try do a direct read using the pbMappingR3 pointer.
1285 * Note! Do not recheck the physical TLB revision number here as we have the
1286 * wrong response to changes in the else case. If someone is updating
1287 * pVCpu->iem.s.CodeTlb.uTlbPhysRev in parallel to us, we should be fine
1288 * pretending we always won the race.
1289 */
1290 if ( (pTlbe->fFlagsAndPhysRev & (/*IEMTLBE_F_PHYS_REV |*/ IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1291 == /*pVCpu->iem.s.CodeTlb.uTlbPhysRev*/ 0U)
1292 {
1293 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1294 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1295 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1296 {
1297 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1298 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1299 }
1300 else
1301 {
1302 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1303 if (cbInstr + (uint32_t)cbDst <= 15)
1304 {
1305 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1306 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1307 }
1308 else
1309 {
1310 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1311 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1312 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1313 }
1314 }
1315 if (cbDst <= cbMaxRead)
1316 {
1317 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1318 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1319
1320 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1321 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1322 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1323 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1324 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1325 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1326 else
1327 Assert(!pvDst);
1328 return;
1329 }
1330 pVCpu->iem.s.pbInstrBuf = NULL;
1331
1332 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1333 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1334 }
1335# else
1336# error "refactor as needed"
1337 /*
1338 * If there is no special read handling, so we can read a bit more and
1339 * put it in the prefetch buffer.
1340 */
1341 if ( cbDst < cbMaxRead
1342 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1343 {
1344 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1345 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1346 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1347 { /* likely */ }
1348 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1349 {
1350 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1351 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1352 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1353 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1354 }
1355 else
1356 {
1357 Log((RT_SUCCESS(rcStrict)
1358 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1359 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1360 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1361 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1362 }
1363 }
1364# endif
1365 /*
1366 * Special read handling, so only read exactly what's needed.
1367 * This is a highly unlikely scenario.
1368 */
1369 else
1370 {
1371 pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath++;
1372
1373 /* Check instruction length. */
1374 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1375 if (RT_LIKELY(cbInstr + cbDst <= 15))
1376 { /* likely */ }
1377 else
1378 {
1379 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1380 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1381 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1382 }
1383
1384 /* Do the reading. */
1385 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1386 if (cbToRead > 0)
1387 {
1388 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1389 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1390 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1391 { /* likely */ }
1392 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1393 {
1394 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1395 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1396 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1397 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1398 }
1399 else
1400 {
1401 Log((RT_SUCCESS(rcStrict)
1402 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1403 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1404 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1405 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1406 }
1407 }
1408
1409 /* Update the state and probably return. */
1410 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1411 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1412 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1413
1414 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1415 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1416 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1417 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1418 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1419 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1420 pVCpu->iem.s.pbInstrBuf = NULL;
1421 if (cbToRead == cbDst)
1422 return;
1423 Assert(cbToRead == cbMaxRead);
1424 }
1425
1426 /*
1427 * More to read, loop.
1428 */
1429 cbDst -= cbMaxRead;
1430 pvDst = (uint8_t *)pvDst + cbMaxRead;
1431 }
1432# else /* !IN_RING3 */
1433 RT_NOREF(pvDst, cbDst);
1434 if (pvDst || cbDst)
1435 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1436# endif /* !IN_RING3 */
1437}
1438
1439#else /* !IEM_WITH_CODE_TLB */
1440
1441/**
1442 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1443 * exception if it fails.
1444 *
1445 * @returns Strict VBox status code.
1446 * @param pVCpu The cross context virtual CPU structure of the
1447 * calling thread.
1448 * @param cbMin The minimum number of bytes relative offOpcode
1449 * that must be read.
1450 */
1451VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1452{
1453 /*
1454 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1455 *
1456 * First translate CS:rIP to a physical address.
1457 */
1458 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1459 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1460 uint8_t const cbLeft = cbOpcode - offOpcode;
1461 Assert(cbLeft < cbMin);
1462 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1463
1464 uint32_t cbToTryRead;
1465 RTGCPTR GCPtrNext;
1466 if (IEM_IS_64BIT_CODE(pVCpu))
1467 {
1468 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1469 if (!IEM_IS_CANONICAL(GCPtrNext))
1470 return iemRaiseGeneralProtectionFault0(pVCpu);
1471 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1472 }
1473 else
1474 {
1475 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1476 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1477 GCPtrNext32 += cbOpcode;
1478 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1479 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1480 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1481 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1482 if (!cbToTryRead) /* overflowed */
1483 {
1484 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1485 cbToTryRead = UINT32_MAX;
1486 /** @todo check out wrapping around the code segment. */
1487 }
1488 if (cbToTryRead < cbMin - cbLeft)
1489 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1490 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1491
1492 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1493 if (cbToTryRead > cbLeftOnPage)
1494 cbToTryRead = cbLeftOnPage;
1495 }
1496
1497 /* Restrict to opcode buffer space.
1498
1499 We're making ASSUMPTIONS here based on work done previously in
1500 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1501 be fetched in case of an instruction crossing two pages. */
1502 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1503 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1504 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1505 { /* likely */ }
1506 else
1507 {
1508 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1509 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1510 return iemRaiseGeneralProtectionFault0(pVCpu);
1511 }
1512
1513 PGMPTWALKFAST WalkFast;
1514 int rc = PGMGstQueryPageFast(pVCpu, GCPtrNext,
1515 IEM_GET_CPL(pVCpu) == 3 ? PGMQPAGE_F_EXECUTE | PGMQPAGE_F_USER_MODE : PGMQPAGE_F_EXECUTE,
1516 &WalkFast);
1517 if (RT_SUCCESS(rc))
1518 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
1519 else
1520 {
1521 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1522#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1523 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
1524 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1525#endif
1526 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1527 }
1528 Assert((WalkFast.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3);
1529 Assert(!(WalkFast.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE));
1530
1531 RTGCPHYS const GCPhys = WalkFast.GCPhys;
1532 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1533
1534 /*
1535 * Read the bytes at this address.
1536 *
1537 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1538 * and since PATM should only patch the start of an instruction there
1539 * should be no need to check again here.
1540 */
1541 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1542 {
1543 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1544 cbToTryRead, PGMACCESSORIGIN_IEM);
1545 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1546 { /* likely */ }
1547 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1548 {
1549 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1550 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1551 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1552 }
1553 else
1554 {
1555 Log((RT_SUCCESS(rcStrict)
1556 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1557 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1558 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1559 return rcStrict;
1560 }
1561 }
1562 else
1563 {
1564 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1565 if (RT_SUCCESS(rc))
1566 { /* likely */ }
1567 else
1568 {
1569 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1570 return rc;
1571 }
1572 }
1573 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1574 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1575
1576 return VINF_SUCCESS;
1577}
1578
1579#endif /* !IEM_WITH_CODE_TLB */
1580#ifndef IEM_WITH_SETJMP
1581
1582/**
1583 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1584 *
1585 * @returns Strict VBox status code.
1586 * @param pVCpu The cross context virtual CPU structure of the
1587 * calling thread.
1588 * @param pb Where to return the opcode byte.
1589 */
1590VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1591{
1592 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1593 if (rcStrict == VINF_SUCCESS)
1594 {
1595 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1596 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1597 pVCpu->iem.s.offOpcode = offOpcode + 1;
1598 }
1599 else
1600 *pb = 0;
1601 return rcStrict;
1602}
1603
1604#else /* IEM_WITH_SETJMP */
1605
1606/**
1607 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1608 *
1609 * @returns The opcode byte.
1610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1611 */
1612uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1613{
1614# ifdef IEM_WITH_CODE_TLB
1615 uint8_t u8;
1616 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1617 return u8;
1618# else
1619 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1620 if (rcStrict == VINF_SUCCESS)
1621 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1622 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1623# endif
1624}
1625
1626#endif /* IEM_WITH_SETJMP */
1627
1628#ifndef IEM_WITH_SETJMP
1629
1630/**
1631 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1632 *
1633 * @returns Strict VBox status code.
1634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1635 * @param pu16 Where to return the opcode dword.
1636 */
1637VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1638{
1639 uint8_t u8;
1640 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1641 if (rcStrict == VINF_SUCCESS)
1642 *pu16 = (int8_t)u8;
1643 return rcStrict;
1644}
1645
1646
1647/**
1648 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1649 *
1650 * @returns Strict VBox status code.
1651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1652 * @param pu32 Where to return the opcode dword.
1653 */
1654VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1655{
1656 uint8_t u8;
1657 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1658 if (rcStrict == VINF_SUCCESS)
1659 *pu32 = (int8_t)u8;
1660 return rcStrict;
1661}
1662
1663
1664/**
1665 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1666 *
1667 * @returns Strict VBox status code.
1668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1669 * @param pu64 Where to return the opcode qword.
1670 */
1671VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1672{
1673 uint8_t u8;
1674 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1675 if (rcStrict == VINF_SUCCESS)
1676 *pu64 = (int8_t)u8;
1677 return rcStrict;
1678}
1679
1680#endif /* !IEM_WITH_SETJMP */
1681
1682
1683#ifndef IEM_WITH_SETJMP
1684
1685/**
1686 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1687 *
1688 * @returns Strict VBox status code.
1689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1690 * @param pu16 Where to return the opcode word.
1691 */
1692VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1693{
1694 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1695 if (rcStrict == VINF_SUCCESS)
1696 {
1697 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1698# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1699 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1700# else
1701 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1702# endif
1703 pVCpu->iem.s.offOpcode = offOpcode + 2;
1704 }
1705 else
1706 *pu16 = 0;
1707 return rcStrict;
1708}
1709
1710#else /* IEM_WITH_SETJMP */
1711
1712/**
1713 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1714 *
1715 * @returns The opcode word.
1716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1717 */
1718uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1719{
1720# ifdef IEM_WITH_CODE_TLB
1721 uint16_t u16;
1722 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1723 return u16;
1724# else
1725 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1726 if (rcStrict == VINF_SUCCESS)
1727 {
1728 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1729 pVCpu->iem.s.offOpcode += 2;
1730# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1731 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1732# else
1733 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1734# endif
1735 }
1736 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1737# endif
1738}
1739
1740#endif /* IEM_WITH_SETJMP */
1741
1742#ifndef IEM_WITH_SETJMP
1743
1744/**
1745 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1746 *
1747 * @returns Strict VBox status code.
1748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1749 * @param pu32 Where to return the opcode double word.
1750 */
1751VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1752{
1753 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1754 if (rcStrict == VINF_SUCCESS)
1755 {
1756 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1757 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1758 pVCpu->iem.s.offOpcode = offOpcode + 2;
1759 }
1760 else
1761 *pu32 = 0;
1762 return rcStrict;
1763}
1764
1765
1766/**
1767 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1768 *
1769 * @returns Strict VBox status code.
1770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1771 * @param pu64 Where to return the opcode quad word.
1772 */
1773VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1774{
1775 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1776 if (rcStrict == VINF_SUCCESS)
1777 {
1778 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1779 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1780 pVCpu->iem.s.offOpcode = offOpcode + 2;
1781 }
1782 else
1783 *pu64 = 0;
1784 return rcStrict;
1785}
1786
1787#endif /* !IEM_WITH_SETJMP */
1788
1789#ifndef IEM_WITH_SETJMP
1790
1791/**
1792 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1793 *
1794 * @returns Strict VBox status code.
1795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1796 * @param pu32 Where to return the opcode dword.
1797 */
1798VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1799{
1800 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1801 if (rcStrict == VINF_SUCCESS)
1802 {
1803 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1804# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1805 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1806# else
1807 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1808 pVCpu->iem.s.abOpcode[offOpcode + 1],
1809 pVCpu->iem.s.abOpcode[offOpcode + 2],
1810 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1811# endif
1812 pVCpu->iem.s.offOpcode = offOpcode + 4;
1813 }
1814 else
1815 *pu32 = 0;
1816 return rcStrict;
1817}
1818
1819#else /* IEM_WITH_SETJMP */
1820
1821/**
1822 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1823 *
1824 * @returns The opcode dword.
1825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1826 */
1827uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1828{
1829# ifdef IEM_WITH_CODE_TLB
1830 uint32_t u32;
1831 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1832 return u32;
1833# else
1834 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1835 if (rcStrict == VINF_SUCCESS)
1836 {
1837 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1838 pVCpu->iem.s.offOpcode = offOpcode + 4;
1839# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1840 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1841# else
1842 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1843 pVCpu->iem.s.abOpcode[offOpcode + 1],
1844 pVCpu->iem.s.abOpcode[offOpcode + 2],
1845 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1846# endif
1847 }
1848 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1849# endif
1850}
1851
1852#endif /* IEM_WITH_SETJMP */
1853
1854#ifndef IEM_WITH_SETJMP
1855
1856/**
1857 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1858 *
1859 * @returns Strict VBox status code.
1860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1861 * @param pu64 Where to return the opcode dword.
1862 */
1863VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1864{
1865 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1866 if (rcStrict == VINF_SUCCESS)
1867 {
1868 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1869 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1870 pVCpu->iem.s.abOpcode[offOpcode + 1],
1871 pVCpu->iem.s.abOpcode[offOpcode + 2],
1872 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1873 pVCpu->iem.s.offOpcode = offOpcode + 4;
1874 }
1875 else
1876 *pu64 = 0;
1877 return rcStrict;
1878}
1879
1880
1881/**
1882 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1883 *
1884 * @returns Strict VBox status code.
1885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1886 * @param pu64 Where to return the opcode qword.
1887 */
1888VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1889{
1890 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1891 if (rcStrict == VINF_SUCCESS)
1892 {
1893 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1894 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1895 pVCpu->iem.s.abOpcode[offOpcode + 1],
1896 pVCpu->iem.s.abOpcode[offOpcode + 2],
1897 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1898 pVCpu->iem.s.offOpcode = offOpcode + 4;
1899 }
1900 else
1901 *pu64 = 0;
1902 return rcStrict;
1903}
1904
1905#endif /* !IEM_WITH_SETJMP */
1906
1907#ifndef IEM_WITH_SETJMP
1908
1909/**
1910 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1911 *
1912 * @returns Strict VBox status code.
1913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1914 * @param pu64 Where to return the opcode qword.
1915 */
1916VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1917{
1918 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1919 if (rcStrict == VINF_SUCCESS)
1920 {
1921 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1922# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1923 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1924# else
1925 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1926 pVCpu->iem.s.abOpcode[offOpcode + 1],
1927 pVCpu->iem.s.abOpcode[offOpcode + 2],
1928 pVCpu->iem.s.abOpcode[offOpcode + 3],
1929 pVCpu->iem.s.abOpcode[offOpcode + 4],
1930 pVCpu->iem.s.abOpcode[offOpcode + 5],
1931 pVCpu->iem.s.abOpcode[offOpcode + 6],
1932 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1933# endif
1934 pVCpu->iem.s.offOpcode = offOpcode + 8;
1935 }
1936 else
1937 *pu64 = 0;
1938 return rcStrict;
1939}
1940
1941#else /* IEM_WITH_SETJMP */
1942
1943/**
1944 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1945 *
1946 * @returns The opcode qword.
1947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1948 */
1949uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1950{
1951# ifdef IEM_WITH_CODE_TLB
1952 uint64_t u64;
1953 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1954 return u64;
1955# else
1956 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1957 if (rcStrict == VINF_SUCCESS)
1958 {
1959 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1960 pVCpu->iem.s.offOpcode = offOpcode + 8;
1961# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1962 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1963# else
1964 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1965 pVCpu->iem.s.abOpcode[offOpcode + 1],
1966 pVCpu->iem.s.abOpcode[offOpcode + 2],
1967 pVCpu->iem.s.abOpcode[offOpcode + 3],
1968 pVCpu->iem.s.abOpcode[offOpcode + 4],
1969 pVCpu->iem.s.abOpcode[offOpcode + 5],
1970 pVCpu->iem.s.abOpcode[offOpcode + 6],
1971 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1972# endif
1973 }
1974 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1975# endif
1976}
1977
1978#endif /* IEM_WITH_SETJMP */
1979
1980
1981
1982/** @name Misc Worker Functions.
1983 * @{
1984 */
1985
1986/**
1987 * Gets the exception class for the specified exception vector.
1988 *
1989 * @returns The class of the specified exception.
1990 * @param uVector The exception vector.
1991 */
1992static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1993{
1994 Assert(uVector <= X86_XCPT_LAST);
1995 switch (uVector)
1996 {
1997 case X86_XCPT_DE:
1998 case X86_XCPT_TS:
1999 case X86_XCPT_NP:
2000 case X86_XCPT_SS:
2001 case X86_XCPT_GP:
2002 case X86_XCPT_SX: /* AMD only */
2003 return IEMXCPTCLASS_CONTRIBUTORY;
2004
2005 case X86_XCPT_PF:
2006 case X86_XCPT_VE: /* Intel only */
2007 return IEMXCPTCLASS_PAGE_FAULT;
2008
2009 case X86_XCPT_DF:
2010 return IEMXCPTCLASS_DOUBLE_FAULT;
2011 }
2012 return IEMXCPTCLASS_BENIGN;
2013}
2014
2015
2016/**
2017 * Evaluates how to handle an exception caused during delivery of another event
2018 * (exception / interrupt).
2019 *
2020 * @returns How to handle the recursive exception.
2021 * @param pVCpu The cross context virtual CPU structure of the
2022 * calling thread.
2023 * @param fPrevFlags The flags of the previous event.
2024 * @param uPrevVector The vector of the previous event.
2025 * @param fCurFlags The flags of the current exception.
2026 * @param uCurVector The vector of the current exception.
2027 * @param pfXcptRaiseInfo Where to store additional information about the
2028 * exception condition. Optional.
2029 */
2030VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
2031 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
2032{
2033 /*
2034 * Only CPU exceptions can be raised while delivering other events, software interrupt
2035 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
2036 */
2037 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
2038 Assert(pVCpu); RT_NOREF(pVCpu);
2039 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
2040
2041 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
2042 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
2043 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2044 {
2045 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
2046 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
2047 {
2048 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
2049 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
2050 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
2051 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
2052 {
2053 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2054 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
2055 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
2056 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
2057 uCurVector, pVCpu->cpum.GstCtx.cr2));
2058 }
2059 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2060 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
2061 {
2062 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
2063 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
2064 }
2065 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
2066 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
2067 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
2068 {
2069 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
2070 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
2071 }
2072 }
2073 else
2074 {
2075 if (uPrevVector == X86_XCPT_NMI)
2076 {
2077 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
2078 if (uCurVector == X86_XCPT_PF)
2079 {
2080 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
2081 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
2082 }
2083 }
2084 else if ( uPrevVector == X86_XCPT_AC
2085 && uCurVector == X86_XCPT_AC)
2086 {
2087 enmRaise = IEMXCPTRAISE_CPU_HANG;
2088 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
2089 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
2090 }
2091 }
2092 }
2093 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
2094 {
2095 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
2096 if (uCurVector == X86_XCPT_PF)
2097 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
2098 }
2099 else
2100 {
2101 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
2102 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
2103 }
2104
2105 if (pfXcptRaiseInfo)
2106 *pfXcptRaiseInfo = fRaiseInfo;
2107 return enmRaise;
2108}
2109
2110
2111/**
2112 * Enters the CPU shutdown state initiated by a triple fault or other
2113 * unrecoverable conditions.
2114 *
2115 * @returns Strict VBox status code.
2116 * @param pVCpu The cross context virtual CPU structure of the
2117 * calling thread.
2118 */
2119static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
2120{
2121 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2122 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
2123
2124 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
2125 {
2126 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
2127 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2128 }
2129
2130 RT_NOREF(pVCpu);
2131 return VINF_EM_TRIPLE_FAULT;
2132}
2133
2134
2135/**
2136 * Validates a new SS segment.
2137 *
2138 * @returns VBox strict status code.
2139 * @param pVCpu The cross context virtual CPU structure of the
2140 * calling thread.
2141 * @param NewSS The new SS selctor.
2142 * @param uCpl The CPL to load the stack for.
2143 * @param pDesc Where to return the descriptor.
2144 */
2145static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
2146{
2147 /* Null selectors are not allowed (we're not called for dispatching
2148 interrupts with SS=0 in long mode). */
2149 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2150 {
2151 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2152 return iemRaiseTaskSwitchFault0(pVCpu);
2153 }
2154
2155 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2156 if ((NewSS & X86_SEL_RPL) != uCpl)
2157 {
2158 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2159 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2160 }
2161
2162 /*
2163 * Read the descriptor.
2164 */
2165 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2166 if (rcStrict != VINF_SUCCESS)
2167 return rcStrict;
2168
2169 /*
2170 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2171 */
2172 if (!pDesc->Legacy.Gen.u1DescType)
2173 {
2174 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2175 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2176 }
2177
2178 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2179 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2180 {
2181 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2182 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2183 }
2184 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2185 {
2186 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2187 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2188 }
2189
2190 /* Is it there? */
2191 /** @todo testcase: Is this checked before the canonical / limit check below? */
2192 if (!pDesc->Legacy.Gen.u1Present)
2193 {
2194 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2195 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2196 }
2197
2198 return VINF_SUCCESS;
2199}
2200
2201/** @} */
2202
2203
2204/** @name Raising Exceptions.
2205 *
2206 * @{
2207 */
2208
2209
2210/**
2211 * Loads the specified stack far pointer from the TSS.
2212 *
2213 * @returns VBox strict status code.
2214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2215 * @param uCpl The CPL to load the stack for.
2216 * @param pSelSS Where to return the new stack segment.
2217 * @param puEsp Where to return the new stack pointer.
2218 */
2219static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
2220{
2221 VBOXSTRICTRC rcStrict;
2222 Assert(uCpl < 4);
2223
2224 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2225 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
2226 {
2227 /*
2228 * 16-bit TSS (X86TSS16).
2229 */
2230 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2231 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2232 {
2233 uint32_t off = uCpl * 4 + 2;
2234 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2235 {
2236 /** @todo check actual access pattern here. */
2237 uint32_t u32Tmp = 0; /* gcc maybe... */
2238 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2239 if (rcStrict == VINF_SUCCESS)
2240 {
2241 *puEsp = RT_LOWORD(u32Tmp);
2242 *pSelSS = RT_HIWORD(u32Tmp);
2243 return VINF_SUCCESS;
2244 }
2245 }
2246 else
2247 {
2248 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2249 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2250 }
2251 break;
2252 }
2253
2254 /*
2255 * 32-bit TSS (X86TSS32).
2256 */
2257 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
2258 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2259 {
2260 uint32_t off = uCpl * 8 + 4;
2261 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2262 {
2263/** @todo check actual access pattern here. */
2264 uint64_t u64Tmp;
2265 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2266 if (rcStrict == VINF_SUCCESS)
2267 {
2268 *puEsp = u64Tmp & UINT32_MAX;
2269 *pSelSS = (RTSEL)(u64Tmp >> 32);
2270 return VINF_SUCCESS;
2271 }
2272 }
2273 else
2274 {
2275 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2276 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2277 }
2278 break;
2279 }
2280
2281 default:
2282 AssertFailed();
2283 rcStrict = VERR_IEM_IPE_4;
2284 break;
2285 }
2286
2287 *puEsp = 0; /* make gcc happy */
2288 *pSelSS = 0; /* make gcc happy */
2289 return rcStrict;
2290}
2291
2292
2293/**
2294 * Loads the specified stack pointer from the 64-bit TSS.
2295 *
2296 * @returns VBox strict status code.
2297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2298 * @param uCpl The CPL to load the stack for.
2299 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2300 * @param puRsp Where to return the new stack pointer.
2301 */
2302static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2303{
2304 Assert(uCpl < 4);
2305 Assert(uIst < 8);
2306 *puRsp = 0; /* make gcc happy */
2307
2308 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2309 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2310
2311 uint32_t off;
2312 if (uIst)
2313 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2314 else
2315 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2316 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2317 {
2318 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2319 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2320 }
2321
2322 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2323}
2324
2325
2326/**
2327 * Adjust the CPU state according to the exception being raised.
2328 *
2329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2330 * @param u8Vector The exception that has been raised.
2331 */
2332DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2333{
2334 switch (u8Vector)
2335 {
2336 case X86_XCPT_DB:
2337 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2338 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2339 break;
2340 /** @todo Read the AMD and Intel exception reference... */
2341 }
2342}
2343
2344
2345/**
2346 * Implements exceptions and interrupts for real mode.
2347 *
2348 * @returns VBox strict status code.
2349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2350 * @param cbInstr The number of bytes to offset rIP by in the return
2351 * address.
2352 * @param u8Vector The interrupt / exception vector number.
2353 * @param fFlags The flags.
2354 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2355 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2356 */
2357static VBOXSTRICTRC
2358iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2359 uint8_t cbInstr,
2360 uint8_t u8Vector,
2361 uint32_t fFlags,
2362 uint16_t uErr,
2363 uint64_t uCr2) RT_NOEXCEPT
2364{
2365 NOREF(uErr); NOREF(uCr2);
2366 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2367
2368 /*
2369 * Read the IDT entry.
2370 */
2371 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2372 {
2373 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2374 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2375 }
2376 RTFAR16 Idte;
2377 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2378 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2379 {
2380 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2381 return rcStrict;
2382 }
2383
2384#ifdef LOG_ENABLED
2385 /* If software interrupt, try decode it if logging is enabled and such. */
2386 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2387 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2388 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2389#endif
2390
2391 /*
2392 * Push the stack frame.
2393 */
2394 uint8_t bUnmapInfo;
2395 uint16_t *pu16Frame;
2396 uint64_t uNewRsp;
2397 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2398 if (rcStrict != VINF_SUCCESS)
2399 return rcStrict;
2400
2401 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2402#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2403 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2404 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2405 fEfl |= UINT16_C(0xf000);
2406#endif
2407 pu16Frame[2] = (uint16_t)fEfl;
2408 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2409 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2410 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2411 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2412 return rcStrict;
2413
2414 /*
2415 * Load the vector address into cs:ip and make exception specific state
2416 * adjustments.
2417 */
2418 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2419 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2420 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2421 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2422 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2423 pVCpu->cpum.GstCtx.rip = Idte.off;
2424 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2425 IEMMISC_SET_EFL(pVCpu, fEfl);
2426
2427 /** @todo do we actually do this in real mode? */
2428 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2429 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2430
2431 /*
2432 * Deal with debug events that follows the exception and clear inhibit flags.
2433 */
2434 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2435 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2436 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2437 else
2438 {
2439 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
2440 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2441 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2442 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2443 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2444 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2445 return iemRaiseDebugException(pVCpu);
2446 }
2447
2448 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2449 so best leave them alone in case we're in a weird kind of real mode... */
2450
2451 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2452}
2453
2454
2455/**
2456 * Loads a NULL data selector into when coming from V8086 mode.
2457 *
2458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2459 * @param pSReg Pointer to the segment register.
2460 */
2461DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2462{
2463 pSReg->Sel = 0;
2464 pSReg->ValidSel = 0;
2465 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2466 {
2467 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2468 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2469 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2470 }
2471 else
2472 {
2473 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2474 /** @todo check this on AMD-V */
2475 pSReg->u64Base = 0;
2476 pSReg->u32Limit = 0;
2477 }
2478}
2479
2480
2481/**
2482 * Loads a segment selector during a task switch in V8086 mode.
2483 *
2484 * @param pSReg Pointer to the segment register.
2485 * @param uSel The selector value to load.
2486 */
2487DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2488{
2489 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2490 pSReg->Sel = uSel;
2491 pSReg->ValidSel = uSel;
2492 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2493 pSReg->u64Base = uSel << 4;
2494 pSReg->u32Limit = 0xffff;
2495 pSReg->Attr.u = 0xf3;
2496}
2497
2498
2499/**
2500 * Loads a segment selector during a task switch in protected mode.
2501 *
2502 * In this task switch scenario, we would throw \#TS exceptions rather than
2503 * \#GPs.
2504 *
2505 * @returns VBox strict status code.
2506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2507 * @param pSReg Pointer to the segment register.
2508 * @param uSel The new selector value.
2509 *
2510 * @remarks This does _not_ handle CS or SS.
2511 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2512 */
2513static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2514{
2515 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2516
2517 /* Null data selector. */
2518 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2519 {
2520 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2521 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2522 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2523 return VINF_SUCCESS;
2524 }
2525
2526 /* Fetch the descriptor. */
2527 IEMSELDESC Desc;
2528 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2529 if (rcStrict != VINF_SUCCESS)
2530 {
2531 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2532 VBOXSTRICTRC_VAL(rcStrict)));
2533 return rcStrict;
2534 }
2535
2536 /* Must be a data segment or readable code segment. */
2537 if ( !Desc.Legacy.Gen.u1DescType
2538 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2539 {
2540 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2541 Desc.Legacy.Gen.u4Type));
2542 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2543 }
2544
2545 /* Check privileges for data segments and non-conforming code segments. */
2546 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2547 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2548 {
2549 /* The RPL and the new CPL must be less than or equal to the DPL. */
2550 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2551 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2552 {
2553 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2554 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2555 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2556 }
2557 }
2558
2559 /* Is it there? */
2560 if (!Desc.Legacy.Gen.u1Present)
2561 {
2562 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2563 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2564 }
2565
2566 /* The base and limit. */
2567 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2568 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2569
2570 /*
2571 * Ok, everything checked out fine. Now set the accessed bit before
2572 * committing the result into the registers.
2573 */
2574 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2575 {
2576 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2577 if (rcStrict != VINF_SUCCESS)
2578 return rcStrict;
2579 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2580 }
2581
2582 /* Commit */
2583 pSReg->Sel = uSel;
2584 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2585 pSReg->u32Limit = cbLimit;
2586 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2587 pSReg->ValidSel = uSel;
2588 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2589 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2590 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2591
2592 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2593 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2594 return VINF_SUCCESS;
2595}
2596
2597
2598/**
2599 * Performs a task switch.
2600 *
2601 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2602 * caller is responsible for performing the necessary checks (like DPL, TSS
2603 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2604 * reference for JMP, CALL, IRET.
2605 *
2606 * If the task switch is the due to a software interrupt or hardware exception,
2607 * the caller is responsible for validating the TSS selector and descriptor. See
2608 * Intel Instruction reference for INT n.
2609 *
2610 * @returns VBox strict status code.
2611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2612 * @param enmTaskSwitch The cause of the task switch.
2613 * @param uNextEip The EIP effective after the task switch.
2614 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2615 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2616 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2617 * @param SelTss The TSS selector of the new task.
2618 * @param pNewDescTss Pointer to the new TSS descriptor.
2619 */
2620VBOXSTRICTRC
2621iemTaskSwitch(PVMCPUCC pVCpu,
2622 IEMTASKSWITCH enmTaskSwitch,
2623 uint32_t uNextEip,
2624 uint32_t fFlags,
2625 uint16_t uErr,
2626 uint64_t uCr2,
2627 RTSEL SelTss,
2628 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2629{
2630 Assert(!IEM_IS_REAL_MODE(pVCpu));
2631 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2632 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2633
2634 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2635 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2636 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2637 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2638 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2639
2640 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2641 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2642
2643 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2644 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2645
2646 /* Update CR2 in case it's a page-fault. */
2647 /** @todo This should probably be done much earlier in IEM/PGM. See
2648 * @bugref{5653#c49}. */
2649 if (fFlags & IEM_XCPT_FLAGS_CR2)
2650 pVCpu->cpum.GstCtx.cr2 = uCr2;
2651
2652 /*
2653 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2654 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2655 */
2656 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2657 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2658 if (uNewTssLimit < uNewTssLimitMin)
2659 {
2660 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2661 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2662 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2663 }
2664
2665 /*
2666 * Task switches in VMX non-root mode always cause task switches.
2667 * The new TSS must have been read and validated (DPL, limits etc.) before a
2668 * task-switch VM-exit commences.
2669 *
2670 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2671 */
2672 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2673 {
2674 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2675 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2676 }
2677
2678 /*
2679 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2680 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2681 */
2682 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2683 {
2684 uint64_t const uExitInfo1 = SelTss;
2685 uint64_t uExitInfo2 = uErr;
2686 switch (enmTaskSwitch)
2687 {
2688 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2689 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2690 default: break;
2691 }
2692 if (fFlags & IEM_XCPT_FLAGS_ERR)
2693 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2694 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2695 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2696
2697 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2698 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2699 RT_NOREF2(uExitInfo1, uExitInfo2);
2700 }
2701
2702 /*
2703 * Check the current TSS limit. The last written byte to the current TSS during the
2704 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2705 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2706 *
2707 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2708 * end up with smaller than "legal" TSS limits.
2709 */
2710 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2711 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2712 if (uCurTssLimit < uCurTssLimitMin)
2713 {
2714 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2715 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2716 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2717 }
2718
2719 /*
2720 * Verify that the new TSS can be accessed and map it. Map only the required contents
2721 * and not the entire TSS.
2722 */
2723 uint8_t bUnmapInfoNewTss;
2724 void *pvNewTss;
2725 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2726 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2727 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2728 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2729 * not perform correct translation if this happens. See Intel spec. 7.2.1
2730 * "Task-State Segment". */
2731 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2732/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2733 * Consider wrapping the remainder into a function for simpler cleanup. */
2734 if (rcStrict != VINF_SUCCESS)
2735 {
2736 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2737 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2738 return rcStrict;
2739 }
2740
2741 /*
2742 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2743 */
2744 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2745 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2746 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2747 {
2748 uint8_t bUnmapInfoDescCurTss;
2749 PX86DESC pDescCurTss;
2750 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2751 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2752 if (rcStrict != VINF_SUCCESS)
2753 {
2754 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2755 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2756 return rcStrict;
2757 }
2758
2759 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2760 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2761 if (rcStrict != VINF_SUCCESS)
2762 {
2763 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2764 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2765 return rcStrict;
2766 }
2767
2768 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2769 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2770 {
2771 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2772 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2773 fEFlags &= ~X86_EFL_NT;
2774 }
2775 }
2776
2777 /*
2778 * Save the CPU state into the current TSS.
2779 */
2780 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2781 if (GCPtrNewTss == GCPtrCurTss)
2782 {
2783 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2784 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2785 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2786 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2787 pVCpu->cpum.GstCtx.ldtr.Sel));
2788 }
2789 if (fIsNewTss386)
2790 {
2791 /*
2792 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2793 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2794 */
2795 uint8_t bUnmapInfoCurTss32;
2796 void *pvCurTss32;
2797 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2798 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2799 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2800 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2801 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2802 if (rcStrict != VINF_SUCCESS)
2803 {
2804 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2805 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2806 return rcStrict;
2807 }
2808
2809 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2810 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2811 pCurTss32->eip = uNextEip;
2812 pCurTss32->eflags = fEFlags;
2813 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2814 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2815 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2816 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2817 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2818 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2819 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2820 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2821 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2822 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2823 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2824 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2825 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2826 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2827
2828 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2829 if (rcStrict != VINF_SUCCESS)
2830 {
2831 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2832 VBOXSTRICTRC_VAL(rcStrict)));
2833 return rcStrict;
2834 }
2835 }
2836 else
2837 {
2838 /*
2839 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2840 */
2841 uint8_t bUnmapInfoCurTss16;
2842 void *pvCurTss16;
2843 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2844 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2845 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2846 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2847 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2848 if (rcStrict != VINF_SUCCESS)
2849 {
2850 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2851 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2852 return rcStrict;
2853 }
2854
2855 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2856 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2857 pCurTss16->ip = uNextEip;
2858 pCurTss16->flags = (uint16_t)fEFlags;
2859 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2860 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2861 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2862 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2863 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2864 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2865 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2866 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2867 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2868 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2869 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2870 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2871
2872 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2873 if (rcStrict != VINF_SUCCESS)
2874 {
2875 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2876 VBOXSTRICTRC_VAL(rcStrict)));
2877 return rcStrict;
2878 }
2879 }
2880
2881 /*
2882 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2883 */
2884 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2885 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2886 {
2887 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2888 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2889 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2890 }
2891
2892 /*
2893 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2894 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2895 */
2896 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2897 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2898 bool fNewDebugTrap;
2899 if (fIsNewTss386)
2900 {
2901 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2902 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2903 uNewEip = pNewTss32->eip;
2904 uNewEflags = pNewTss32->eflags;
2905 uNewEax = pNewTss32->eax;
2906 uNewEcx = pNewTss32->ecx;
2907 uNewEdx = pNewTss32->edx;
2908 uNewEbx = pNewTss32->ebx;
2909 uNewEsp = pNewTss32->esp;
2910 uNewEbp = pNewTss32->ebp;
2911 uNewEsi = pNewTss32->esi;
2912 uNewEdi = pNewTss32->edi;
2913 uNewES = pNewTss32->es;
2914 uNewCS = pNewTss32->cs;
2915 uNewSS = pNewTss32->ss;
2916 uNewDS = pNewTss32->ds;
2917 uNewFS = pNewTss32->fs;
2918 uNewGS = pNewTss32->gs;
2919 uNewLdt = pNewTss32->selLdt;
2920 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2921 }
2922 else
2923 {
2924 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2925 uNewCr3 = 0;
2926 uNewEip = pNewTss16->ip;
2927 uNewEflags = pNewTss16->flags;
2928 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2929 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2930 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2931 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2932 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2933 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2934 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2935 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2936 uNewES = pNewTss16->es;
2937 uNewCS = pNewTss16->cs;
2938 uNewSS = pNewTss16->ss;
2939 uNewDS = pNewTss16->ds;
2940 uNewFS = 0;
2941 uNewGS = 0;
2942 uNewLdt = pNewTss16->selLdt;
2943 fNewDebugTrap = false;
2944 }
2945
2946 if (GCPtrNewTss == GCPtrCurTss)
2947 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2948 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2949
2950 /*
2951 * We're done accessing the new TSS.
2952 */
2953 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2954 if (rcStrict != VINF_SUCCESS)
2955 {
2956 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2957 return rcStrict;
2958 }
2959
2960 /*
2961 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2962 */
2963 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2964 {
2965 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2966 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2967 if (rcStrict != VINF_SUCCESS)
2968 {
2969 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2970 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2971 return rcStrict;
2972 }
2973
2974 /* Check that the descriptor indicates the new TSS is available (not busy). */
2975 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2976 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2977 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2978
2979 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2980 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2981 if (rcStrict != VINF_SUCCESS)
2982 {
2983 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2984 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2985 return rcStrict;
2986 }
2987 }
2988
2989 /*
2990 * From this point on, we're technically in the new task. We will defer exceptions
2991 * until the completion of the task switch but before executing any instructions in the new task.
2992 */
2993 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2994 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2995 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2996 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2997 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2998 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2999 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3000
3001 /* Set the busy bit in TR. */
3002 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3003
3004 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3005 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3006 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3007 {
3008 uNewEflags |= X86_EFL_NT;
3009 }
3010
3011 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3012 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
3013 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3014
3015 pVCpu->cpum.GstCtx.eip = uNewEip;
3016 pVCpu->cpum.GstCtx.eax = uNewEax;
3017 pVCpu->cpum.GstCtx.ecx = uNewEcx;
3018 pVCpu->cpum.GstCtx.edx = uNewEdx;
3019 pVCpu->cpum.GstCtx.ebx = uNewEbx;
3020 pVCpu->cpum.GstCtx.esp = uNewEsp;
3021 pVCpu->cpum.GstCtx.ebp = uNewEbp;
3022 pVCpu->cpum.GstCtx.esi = uNewEsi;
3023 pVCpu->cpum.GstCtx.edi = uNewEdi;
3024
3025 uNewEflags &= X86_EFL_LIVE_MASK;
3026 uNewEflags |= X86_EFL_RA1_MASK;
3027 IEMMISC_SET_EFL(pVCpu, uNewEflags);
3028
3029 /*
3030 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3031 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3032 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3033 */
3034 pVCpu->cpum.GstCtx.es.Sel = uNewES;
3035 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
3036
3037 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3038 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
3039
3040 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3041 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
3042
3043 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
3044 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
3045
3046 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
3047 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
3048
3049 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
3050 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
3051 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3052
3053 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
3054 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3055 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
3056 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3057
3058 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3059 {
3060 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
3061 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
3062 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
3063 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
3064 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
3065 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
3066 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3067 }
3068
3069 /*
3070 * Switch CR3 for the new task.
3071 */
3072 if ( fIsNewTss386
3073 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
3074 {
3075 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3076 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3077 AssertRCSuccessReturn(rc, rc);
3078
3079 /* Inform PGM. */
3080 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
3081 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
3082 AssertRCReturn(rc, rc);
3083 /* ignore informational status codes */
3084
3085 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3086 }
3087
3088 /*
3089 * Switch LDTR for the new task.
3090 */
3091 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3092 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
3093 else
3094 {
3095 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3096
3097 IEMSELDESC DescNewLdt;
3098 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3099 if (rcStrict != VINF_SUCCESS)
3100 {
3101 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3102 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3103 return rcStrict;
3104 }
3105 if ( !DescNewLdt.Legacy.Gen.u1Present
3106 || DescNewLdt.Legacy.Gen.u1DescType
3107 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3108 {
3109 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3110 uNewLdt, DescNewLdt.Legacy.u));
3111 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3112 }
3113
3114 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
3115 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3116 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3117 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3118 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3119 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3120 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
3122 }
3123
3124 IEMSELDESC DescSS;
3125 if (IEM_IS_V86_MODE(pVCpu))
3126 {
3127 IEM_SET_CPL(pVCpu, 3);
3128 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
3129 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
3130 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
3131 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
3132 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
3133 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
3134
3135 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
3136 DescSS.Legacy.u = 0;
3137 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
3138 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
3139 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
3140 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
3141 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
3142 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
3143 DescSS.Legacy.Gen.u2Dpl = 3;
3144 }
3145 else
3146 {
3147 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
3148
3149 /*
3150 * Load the stack segment for the new task.
3151 */
3152 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3153 {
3154 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3155 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3156 }
3157
3158 /* Fetch the descriptor. */
3159 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3160 if (rcStrict != VINF_SUCCESS)
3161 {
3162 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3163 VBOXSTRICTRC_VAL(rcStrict)));
3164 return rcStrict;
3165 }
3166
3167 /* SS must be a data segment and writable. */
3168 if ( !DescSS.Legacy.Gen.u1DescType
3169 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3170 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3171 {
3172 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3173 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3174 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3175 }
3176
3177 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3178 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3179 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3180 {
3181 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3182 uNewCpl));
3183 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3184 }
3185
3186 /* Is it there? */
3187 if (!DescSS.Legacy.Gen.u1Present)
3188 {
3189 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3190 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3191 }
3192
3193 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3194 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3195
3196 /* Set the accessed bit before committing the result into SS. */
3197 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3198 {
3199 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3200 if (rcStrict != VINF_SUCCESS)
3201 return rcStrict;
3202 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3203 }
3204
3205 /* Commit SS. */
3206 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3207 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3208 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3209 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
3210 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
3211 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
3213
3214 /* CPL has changed, update IEM before loading rest of segments. */
3215 IEM_SET_CPL(pVCpu, uNewCpl);
3216
3217 /*
3218 * Load the data segments for the new task.
3219 */
3220 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
3221 if (rcStrict != VINF_SUCCESS)
3222 return rcStrict;
3223 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
3224 if (rcStrict != VINF_SUCCESS)
3225 return rcStrict;
3226 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
3227 if (rcStrict != VINF_SUCCESS)
3228 return rcStrict;
3229 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
3230 if (rcStrict != VINF_SUCCESS)
3231 return rcStrict;
3232
3233 /*
3234 * Load the code segment for the new task.
3235 */
3236 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
3237 {
3238 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
3239 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3240 }
3241
3242 /* Fetch the descriptor. */
3243 IEMSELDESC DescCS;
3244 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
3245 if (rcStrict != VINF_SUCCESS)
3246 {
3247 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
3248 return rcStrict;
3249 }
3250
3251 /* CS must be a code segment. */
3252 if ( !DescCS.Legacy.Gen.u1DescType
3253 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3254 {
3255 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
3256 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3257 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3258 }
3259
3260 /* For conforming CS, DPL must be less than or equal to the RPL. */
3261 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3262 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
3263 {
3264 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
3265 DescCS.Legacy.Gen.u2Dpl));
3266 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3267 }
3268
3269 /* For non-conforming CS, DPL must match RPL. */
3270 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
3271 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
3272 {
3273 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
3274 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
3275 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3276 }
3277
3278 /* Is it there? */
3279 if (!DescCS.Legacy.Gen.u1Present)
3280 {
3281 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3282 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3283 }
3284
3285 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3286 u64Base = X86DESC_BASE(&DescCS.Legacy);
3287
3288 /* Set the accessed bit before committing the result into CS. */
3289 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3290 {
3291 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3292 if (rcStrict != VINF_SUCCESS)
3293 return rcStrict;
3294 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3295 }
3296
3297 /* Commit CS. */
3298 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3299 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3300 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3301 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3302 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3303 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3305 }
3306
3307 /* Make sure the CPU mode is correct. */
3308 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3309 if (fExecNew != pVCpu->iem.s.fExec)
3310 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3311 pVCpu->iem.s.fExec = fExecNew;
3312
3313 /** @todo Debug trap. */
3314 if (fIsNewTss386 && fNewDebugTrap)
3315 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3316
3317 /*
3318 * Construct the error code masks based on what caused this task switch.
3319 * See Intel Instruction reference for INT.
3320 */
3321 uint16_t uExt;
3322 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3323 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3324 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3325 uExt = 1;
3326 else
3327 uExt = 0;
3328
3329 /*
3330 * Push any error code on to the new stack.
3331 */
3332 if (fFlags & IEM_XCPT_FLAGS_ERR)
3333 {
3334 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3335 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3336 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3337
3338 /* Check that there is sufficient space on the stack. */
3339 /** @todo Factor out segment limit checking for normal/expand down segments
3340 * into a separate function. */
3341 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3342 {
3343 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3344 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3345 {
3346 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3347 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3348 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3349 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3350 }
3351 }
3352 else
3353 {
3354 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3355 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3356 {
3357 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3358 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3359 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3360 }
3361 }
3362
3363
3364 if (fIsNewTss386)
3365 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3366 else
3367 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3368 if (rcStrict != VINF_SUCCESS)
3369 {
3370 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3371 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3372 return rcStrict;
3373 }
3374 }
3375
3376 /* Check the new EIP against the new CS limit. */
3377 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3378 {
3379 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3380 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3381 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3382 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3383 }
3384
3385 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3386 pVCpu->cpum.GstCtx.ss.Sel));
3387 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3388}
3389
3390
3391/**
3392 * Implements exceptions and interrupts for protected mode.
3393 *
3394 * @returns VBox strict status code.
3395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3396 * @param cbInstr The number of bytes to offset rIP by in the return
3397 * address.
3398 * @param u8Vector The interrupt / exception vector number.
3399 * @param fFlags The flags.
3400 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3401 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3402 */
3403static VBOXSTRICTRC
3404iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3405 uint8_t cbInstr,
3406 uint8_t u8Vector,
3407 uint32_t fFlags,
3408 uint16_t uErr,
3409 uint64_t uCr2) RT_NOEXCEPT
3410{
3411 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3412
3413 /*
3414 * Read the IDT entry.
3415 */
3416 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3417 {
3418 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3419 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3420 }
3421 X86DESC Idte;
3422 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3423 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3424 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3425 {
3426 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3427 return rcStrict;
3428 }
3429 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3430 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3431 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3432 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3433
3434 /*
3435 * Check the descriptor type, DPL and such.
3436 * ASSUMES this is done in the same order as described for call-gate calls.
3437 */
3438 if (Idte.Gate.u1DescType)
3439 {
3440 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3441 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3442 }
3443 bool fTaskGate = false;
3444 uint8_t f32BitGate = true;
3445 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3446 switch (Idte.Gate.u4Type)
3447 {
3448 case X86_SEL_TYPE_SYS_UNDEFINED:
3449 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3450 case X86_SEL_TYPE_SYS_LDT:
3451 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3452 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3453 case X86_SEL_TYPE_SYS_UNDEFINED2:
3454 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3455 case X86_SEL_TYPE_SYS_UNDEFINED3:
3456 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3457 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3458 case X86_SEL_TYPE_SYS_UNDEFINED4:
3459 {
3460 /** @todo check what actually happens when the type is wrong...
3461 * esp. call gates. */
3462 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3463 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3464 }
3465
3466 case X86_SEL_TYPE_SYS_286_INT_GATE:
3467 f32BitGate = false;
3468 RT_FALL_THRU();
3469 case X86_SEL_TYPE_SYS_386_INT_GATE:
3470 fEflToClear |= X86_EFL_IF;
3471 break;
3472
3473 case X86_SEL_TYPE_SYS_TASK_GATE:
3474 fTaskGate = true;
3475#ifndef IEM_IMPLEMENTS_TASKSWITCH
3476 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3477#endif
3478 break;
3479
3480 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3481 f32BitGate = false;
3482 break;
3483 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3484 break;
3485
3486 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3487 }
3488
3489 /* Check DPL against CPL if applicable. */
3490 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3491 {
3492 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3493 {
3494 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3495 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3496 }
3497 }
3498
3499 /* Is it there? */
3500 if (!Idte.Gate.u1Present)
3501 {
3502 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3503 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3504 }
3505
3506 /* Is it a task-gate? */
3507 if (fTaskGate)
3508 {
3509 /*
3510 * Construct the error code masks based on what caused this task switch.
3511 * See Intel Instruction reference for INT.
3512 */
3513 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3514 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3515 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3516 RTSEL SelTss = Idte.Gate.u16Sel;
3517
3518 /*
3519 * Fetch the TSS descriptor in the GDT.
3520 */
3521 IEMSELDESC DescTSS;
3522 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3523 if (rcStrict != VINF_SUCCESS)
3524 {
3525 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3526 VBOXSTRICTRC_VAL(rcStrict)));
3527 return rcStrict;
3528 }
3529
3530 /* The TSS descriptor must be a system segment and be available (not busy). */
3531 if ( DescTSS.Legacy.Gen.u1DescType
3532 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3533 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3534 {
3535 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3536 u8Vector, SelTss, DescTSS.Legacy.au64));
3537 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3538 }
3539
3540 /* The TSS must be present. */
3541 if (!DescTSS.Legacy.Gen.u1Present)
3542 {
3543 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3544 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3545 }
3546
3547 /* Do the actual task switch. */
3548 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3549 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3550 fFlags, uErr, uCr2, SelTss, &DescTSS);
3551 }
3552
3553 /* A null CS is bad. */
3554 RTSEL NewCS = Idte.Gate.u16Sel;
3555 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3556 {
3557 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3558 return iemRaiseGeneralProtectionFault0(pVCpu);
3559 }
3560
3561 /* Fetch the descriptor for the new CS. */
3562 IEMSELDESC DescCS;
3563 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3564 if (rcStrict != VINF_SUCCESS)
3565 {
3566 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3567 return rcStrict;
3568 }
3569
3570 /* Must be a code segment. */
3571 if (!DescCS.Legacy.Gen.u1DescType)
3572 {
3573 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3574 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3575 }
3576 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3577 {
3578 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3579 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3580 }
3581
3582 /* Don't allow lowering the privilege level. */
3583 /** @todo Does the lowering of privileges apply to software interrupts
3584 * only? This has bearings on the more-privileged or
3585 * same-privilege stack behavior further down. A testcase would
3586 * be nice. */
3587 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3588 {
3589 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3590 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3591 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3592 }
3593
3594 /* Make sure the selector is present. */
3595 if (!DescCS.Legacy.Gen.u1Present)
3596 {
3597 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3598 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3599 }
3600
3601#ifdef LOG_ENABLED
3602 /* If software interrupt, try decode it if logging is enabled and such. */
3603 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3604 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3605 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3606#endif
3607
3608 /* Check the new EIP against the new CS limit. */
3609 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3610 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3611 ? Idte.Gate.u16OffsetLow
3612 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3613 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3614 if (uNewEip > cbLimitCS)
3615 {
3616 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3617 u8Vector, uNewEip, cbLimitCS, NewCS));
3618 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3619 }
3620 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3621
3622 /* Calc the flag image to push. */
3623 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3624 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3625 fEfl &= ~X86_EFL_RF;
3626 else
3627 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3628
3629 /* From V8086 mode only go to CPL 0. */
3630 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3631 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3632 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3633 {
3634 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3635 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3636 }
3637
3638 /*
3639 * If the privilege level changes, we need to get a new stack from the TSS.
3640 * This in turns means validating the new SS and ESP...
3641 */
3642 if (uNewCpl != IEM_GET_CPL(pVCpu))
3643 {
3644 RTSEL NewSS;
3645 uint32_t uNewEsp;
3646 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3647 if (rcStrict != VINF_SUCCESS)
3648 return rcStrict;
3649
3650 IEMSELDESC DescSS;
3651 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3652 if (rcStrict != VINF_SUCCESS)
3653 return rcStrict;
3654 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3655 if (!DescSS.Legacy.Gen.u1DefBig)
3656 {
3657 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3658 uNewEsp = (uint16_t)uNewEsp;
3659 }
3660
3661 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3662
3663 /* Check that there is sufficient space for the stack frame. */
3664 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3665 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3666 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3667 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3668
3669 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3670 {
3671 if ( uNewEsp - 1 > cbLimitSS
3672 || uNewEsp < cbStackFrame)
3673 {
3674 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3675 u8Vector, NewSS, uNewEsp, cbStackFrame));
3676 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3677 }
3678 }
3679 else
3680 {
3681 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3682 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3683 {
3684 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3685 u8Vector, NewSS, uNewEsp, cbStackFrame));
3686 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3687 }
3688 }
3689
3690 /*
3691 * Start making changes.
3692 */
3693
3694 /* Set the new CPL so that stack accesses use it. */
3695 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3696 IEM_SET_CPL(pVCpu, uNewCpl);
3697
3698 /* Create the stack frame. */
3699 uint8_t bUnmapInfoStackFrame;
3700 RTPTRUNION uStackFrame;
3701 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3702 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3703 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3704 if (rcStrict != VINF_SUCCESS)
3705 return rcStrict;
3706 if (f32BitGate)
3707 {
3708 if (fFlags & IEM_XCPT_FLAGS_ERR)
3709 *uStackFrame.pu32++ = uErr;
3710 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3711 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3712 uStackFrame.pu32[2] = fEfl;
3713 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3714 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3715 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3716 if (fEfl & X86_EFL_VM)
3717 {
3718 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3719 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3720 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3721 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3722 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3723 }
3724 }
3725 else
3726 {
3727 if (fFlags & IEM_XCPT_FLAGS_ERR)
3728 *uStackFrame.pu16++ = uErr;
3729 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3730 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3731 uStackFrame.pu16[2] = fEfl;
3732 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3733 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3734 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3735 if (fEfl & X86_EFL_VM)
3736 {
3737 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3738 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3739 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3740 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3741 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3742 }
3743 }
3744 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3745 if (rcStrict != VINF_SUCCESS)
3746 return rcStrict;
3747
3748 /* Mark the selectors 'accessed' (hope this is the correct time). */
3749 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3750 * after pushing the stack frame? (Write protect the gdt + stack to
3751 * find out.) */
3752 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3753 {
3754 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3755 if (rcStrict != VINF_SUCCESS)
3756 return rcStrict;
3757 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3758 }
3759
3760 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3761 {
3762 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3763 if (rcStrict != VINF_SUCCESS)
3764 return rcStrict;
3765 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3766 }
3767
3768 /*
3769 * Start comitting the register changes (joins with the DPL=CPL branch).
3770 */
3771 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3772 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3773 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3774 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3775 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3776 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3777 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3778 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3779 * SP is loaded).
3780 * Need to check the other combinations too:
3781 * - 16-bit TSS, 32-bit handler
3782 * - 32-bit TSS, 16-bit handler */
3783 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3784 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3785 else
3786 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3787
3788 if (fEfl & X86_EFL_VM)
3789 {
3790 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3791 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3792 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3793 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3794 }
3795 }
3796 /*
3797 * Same privilege, no stack change and smaller stack frame.
3798 */
3799 else
3800 {
3801 uint64_t uNewRsp;
3802 uint8_t bUnmapInfoStackFrame;
3803 RTPTRUNION uStackFrame;
3804 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3805 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3806 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3807 if (rcStrict != VINF_SUCCESS)
3808 return rcStrict;
3809
3810 if (f32BitGate)
3811 {
3812 if (fFlags & IEM_XCPT_FLAGS_ERR)
3813 *uStackFrame.pu32++ = uErr;
3814 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3815 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3816 uStackFrame.pu32[2] = fEfl;
3817 }
3818 else
3819 {
3820 if (fFlags & IEM_XCPT_FLAGS_ERR)
3821 *uStackFrame.pu16++ = uErr;
3822 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3823 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3824 uStackFrame.pu16[2] = fEfl;
3825 }
3826 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3827 if (rcStrict != VINF_SUCCESS)
3828 return rcStrict;
3829
3830 /* Mark the CS selector as 'accessed'. */
3831 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3832 {
3833 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3834 if (rcStrict != VINF_SUCCESS)
3835 return rcStrict;
3836 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3837 }
3838
3839 /*
3840 * Start committing the register changes (joins with the other branch).
3841 */
3842 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3843 }
3844
3845 /* ... register committing continues. */
3846 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3847 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3848 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3849 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3850 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3851 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3852
3853 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3854 fEfl &= ~fEflToClear;
3855 IEMMISC_SET_EFL(pVCpu, fEfl);
3856
3857 if (fFlags & IEM_XCPT_FLAGS_CR2)
3858 pVCpu->cpum.GstCtx.cr2 = uCr2;
3859
3860 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3861 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3862
3863 /* Make sure the execution flags are correct. */
3864 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3865 if (fExecNew != pVCpu->iem.s.fExec)
3866 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3867 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3868 pVCpu->iem.s.fExec = fExecNew;
3869 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3870
3871 /*
3872 * Deal with debug events that follows the exception and clear inhibit flags.
3873 */
3874 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3875 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
3876 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3877 else
3878 {
3879 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
3880 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
3881 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3882 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3883 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
3884 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
3885 return iemRaiseDebugException(pVCpu);
3886 }
3887
3888 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3889}
3890
3891
3892/**
3893 * Implements exceptions and interrupts for long mode.
3894 *
3895 * @returns VBox strict status code.
3896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3897 * @param cbInstr The number of bytes to offset rIP by in the return
3898 * address.
3899 * @param u8Vector The interrupt / exception vector number.
3900 * @param fFlags The flags.
3901 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3902 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3903 */
3904static VBOXSTRICTRC
3905iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3906 uint8_t cbInstr,
3907 uint8_t u8Vector,
3908 uint32_t fFlags,
3909 uint16_t uErr,
3910 uint64_t uCr2) RT_NOEXCEPT
3911{
3912 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3913
3914 /*
3915 * Read the IDT entry.
3916 */
3917 uint16_t offIdt = (uint16_t)u8Vector << 4;
3918 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3919 {
3920 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3921 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3922 }
3923 X86DESC64 Idte;
3924#ifdef _MSC_VER /* Shut up silly compiler warning. */
3925 Idte.au64[0] = 0;
3926 Idte.au64[1] = 0;
3927#endif
3928 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3929 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3930 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3931 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3932 {
3933 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3934 return rcStrict;
3935 }
3936 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3937 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3938 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3939
3940 /*
3941 * Check the descriptor type, DPL and such.
3942 * ASSUMES this is done in the same order as described for call-gate calls.
3943 */
3944 if (Idte.Gate.u1DescType)
3945 {
3946 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3947 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3948 }
3949 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3950 switch (Idte.Gate.u4Type)
3951 {
3952 case AMD64_SEL_TYPE_SYS_INT_GATE:
3953 fEflToClear |= X86_EFL_IF;
3954 break;
3955 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3956 break;
3957
3958 default:
3959 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3960 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3961 }
3962
3963 /* Check DPL against CPL if applicable. */
3964 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3965 {
3966 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3967 {
3968 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3969 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3970 }
3971 }
3972
3973 /* Is it there? */
3974 if (!Idte.Gate.u1Present)
3975 {
3976 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3977 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3978 }
3979
3980 /* A null CS is bad. */
3981 RTSEL NewCS = Idte.Gate.u16Sel;
3982 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3983 {
3984 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3985 return iemRaiseGeneralProtectionFault0(pVCpu);
3986 }
3987
3988 /* Fetch the descriptor for the new CS. */
3989 IEMSELDESC DescCS;
3990 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3991 if (rcStrict != VINF_SUCCESS)
3992 {
3993 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3994 return rcStrict;
3995 }
3996
3997 /* Must be a 64-bit code segment. */
3998 if (!DescCS.Long.Gen.u1DescType)
3999 {
4000 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4001 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4002 }
4003 if ( !DescCS.Long.Gen.u1Long
4004 || DescCS.Long.Gen.u1DefBig
4005 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4006 {
4007 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4008 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4009 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4010 }
4011
4012 /* Don't allow lowering the privilege level. For non-conforming CS
4013 selectors, the CS.DPL sets the privilege level the trap/interrupt
4014 handler runs at. For conforming CS selectors, the CPL remains
4015 unchanged, but the CS.DPL must be <= CPL. */
4016 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4017 * when CPU in Ring-0. Result \#GP? */
4018 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
4019 {
4020 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4021 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4022 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4023 }
4024
4025
4026 /* Make sure the selector is present. */
4027 if (!DescCS.Legacy.Gen.u1Present)
4028 {
4029 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4030 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4031 }
4032
4033 /* Check that the new RIP is canonical. */
4034 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4035 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4036 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4037 if (!IEM_IS_CANONICAL(uNewRip))
4038 {
4039 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4040 return iemRaiseGeneralProtectionFault0(pVCpu);
4041 }
4042
4043 /*
4044 * If the privilege level changes or if the IST isn't zero, we need to get
4045 * a new stack from the TSS.
4046 */
4047 uint64_t uNewRsp;
4048 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4049 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
4050 if ( uNewCpl != IEM_GET_CPL(pVCpu)
4051 || Idte.Gate.u3IST != 0)
4052 {
4053 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4054 if (rcStrict != VINF_SUCCESS)
4055 return rcStrict;
4056 }
4057 else
4058 uNewRsp = pVCpu->cpum.GstCtx.rsp;
4059 uNewRsp &= ~(uint64_t)0xf;
4060
4061 /*
4062 * Calc the flag image to push.
4063 */
4064 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4065 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4066 fEfl &= ~X86_EFL_RF;
4067 else
4068 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4069
4070 /*
4071 * Start making changes.
4072 */
4073 /* Set the new CPL so that stack accesses use it. */
4074 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
4075 IEM_SET_CPL(pVCpu, uNewCpl);
4076/** @todo Setting CPL this early seems wrong as it would affect and errors we
4077 * raise accessing the stack and (?) GDT/LDT... */
4078
4079 /* Create the stack frame. */
4080 uint8_t bUnmapInfoStackFrame;
4081 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4082 RTPTRUNION uStackFrame;
4083 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
4084 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
4085 if (rcStrict != VINF_SUCCESS)
4086 return rcStrict;
4087
4088 if (fFlags & IEM_XCPT_FLAGS_ERR)
4089 *uStackFrame.pu64++ = uErr;
4090 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
4091 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4092 uStackFrame.pu64[2] = fEfl;
4093 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
4094 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
4095 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
4096 if (rcStrict != VINF_SUCCESS)
4097 return rcStrict;
4098
4099 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4100 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4101 * after pushing the stack frame? (Write protect the gdt + stack to
4102 * find out.) */
4103 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4104 {
4105 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4106 if (rcStrict != VINF_SUCCESS)
4107 return rcStrict;
4108 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4109 }
4110
4111 /*
4112 * Start comitting the register changes.
4113 */
4114 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4115 * hidden registers when interrupting 32-bit or 16-bit code! */
4116 if (uNewCpl != uOldCpl)
4117 {
4118 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
4119 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
4120 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4121 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4122 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4123 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4124 }
4125 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
4126 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4127 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4128 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4129 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4130 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4131 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4132 pVCpu->cpum.GstCtx.rip = uNewRip;
4133
4134 fEfl &= ~fEflToClear;
4135 IEMMISC_SET_EFL(pVCpu, fEfl);
4136
4137 if (fFlags & IEM_XCPT_FLAGS_CR2)
4138 pVCpu->cpum.GstCtx.cr2 = uCr2;
4139
4140 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4141 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4142
4143 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
4144
4145 /*
4146 * Deal with debug events that follows the exception and clear inhibit flags.
4147 */
4148 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4149 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
4150 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4151 else
4152 {
4153 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
4154 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
4155 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
4156 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4157 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
4158 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
4159 return iemRaiseDebugException(pVCpu);
4160 }
4161
4162 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4163}
4164
4165
4166/**
4167 * Implements exceptions and interrupts.
4168 *
4169 * All exceptions and interrupts goes thru this function!
4170 *
4171 * @returns VBox strict status code.
4172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4173 * @param cbInstr The number of bytes to offset rIP by in the return
4174 * address.
4175 * @param u8Vector The interrupt / exception vector number.
4176 * @param fFlags The flags.
4177 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4178 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4179 */
4180VBOXSTRICTRC
4181iemRaiseXcptOrInt(PVMCPUCC pVCpu,
4182 uint8_t cbInstr,
4183 uint8_t u8Vector,
4184 uint32_t fFlags,
4185 uint16_t uErr,
4186 uint64_t uCr2) RT_NOEXCEPT
4187{
4188 /*
4189 * Get all the state that we might need here.
4190 */
4191 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4192 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4193
4194#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4195 /*
4196 * Flush prefetch buffer
4197 */
4198 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4199#endif
4200
4201 /*
4202 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4203 */
4204 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
4205 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
4206 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
4207 | IEM_XCPT_FLAGS_BP_INSTR
4208 | IEM_XCPT_FLAGS_ICEBP_INSTR
4209 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4210 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
4211 {
4212 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4213 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4214 u8Vector = X86_XCPT_GP;
4215 uErr = 0;
4216 }
4217
4218 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
4219#ifdef DBGFTRACE_ENABLED
4220 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4221 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4222 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
4223#endif
4224
4225 /*
4226 * Check if DBGF wants to intercept the exception.
4227 */
4228 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
4229 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
4230 { /* likely */ }
4231 else
4232 {
4233 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
4234 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
4235 if (rcStrict != VINF_SUCCESS)
4236 return rcStrict;
4237 }
4238
4239 /*
4240 * Evaluate whether NMI blocking should be in effect.
4241 * Normally, NMI blocking is in effect whenever we inject an NMI.
4242 */
4243 bool fBlockNmi = u8Vector == X86_XCPT_NMI
4244 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
4245
4246#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4247 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4248 {
4249 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
4250 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4251 return rcStrict0;
4252
4253 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
4254 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
4255 {
4256 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
4257 fBlockNmi = false;
4258 }
4259 }
4260#endif
4261
4262#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4263 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
4264 {
4265 /*
4266 * If the event is being injected as part of VMRUN, it isn't subject to event
4267 * intercepts in the nested-guest. However, secondary exceptions that occur
4268 * during injection of any event -are- subject to exception intercepts.
4269 *
4270 * See AMD spec. 15.20 "Event Injection".
4271 */
4272 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
4273 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
4274 else
4275 {
4276 /*
4277 * Check and handle if the event being raised is intercepted.
4278 */
4279 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4280 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
4281 return rcStrict0;
4282 }
4283 }
4284#endif
4285
4286 /*
4287 * Set NMI blocking if necessary.
4288 */
4289 if (fBlockNmi)
4290 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
4291
4292 /*
4293 * Do recursion accounting.
4294 */
4295 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4296 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4297 if (pVCpu->iem.s.cXcptRecursions == 0)
4298 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4299 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
4300 else
4301 {
4302 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4303 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
4304 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4305
4306 if (pVCpu->iem.s.cXcptRecursions >= 4)
4307 {
4308#ifdef DEBUG_bird
4309 AssertFailed();
4310#endif
4311 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4312 }
4313
4314 /*
4315 * Evaluate the sequence of recurring events.
4316 */
4317 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4318 NULL /* pXcptRaiseInfo */);
4319 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4320 { /* likely */ }
4321 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4322 {
4323 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4324 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4325 u8Vector = X86_XCPT_DF;
4326 uErr = 0;
4327#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4328 /* VMX nested-guest #DF intercept needs to be checked here. */
4329 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4330 {
4331 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4332 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4333 return rcStrict0;
4334 }
4335#endif
4336 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4337 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4338 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4339 }
4340 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4341 {
4342 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4343 return iemInitiateCpuShutdown(pVCpu);
4344 }
4345 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4346 {
4347 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4348 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4349 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4350 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4351 return VERR_EM_GUEST_CPU_HANG;
4352 }
4353 else
4354 {
4355 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4356 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4357 return VERR_IEM_IPE_9;
4358 }
4359
4360 /*
4361 * The 'EXT' bit is set when an exception occurs during deliver of an external
4362 * event (such as an interrupt or earlier exception)[1]. Privileged software
4363 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4364 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4365 *
4366 * [1] - Intel spec. 6.13 "Error Code"
4367 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4368 * [3] - Intel Instruction reference for INT n.
4369 */
4370 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4371 && (fFlags & IEM_XCPT_FLAGS_ERR)
4372 && u8Vector != X86_XCPT_PF
4373 && u8Vector != X86_XCPT_DF)
4374 {
4375 uErr |= X86_TRAP_ERR_EXTERNAL;
4376 }
4377 }
4378
4379 pVCpu->iem.s.cXcptRecursions++;
4380 pVCpu->iem.s.uCurXcpt = u8Vector;
4381 pVCpu->iem.s.fCurXcpt = fFlags;
4382 pVCpu->iem.s.uCurXcptErr = uErr;
4383 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4384
4385 /*
4386 * Extensive logging.
4387 */
4388#if defined(LOG_ENABLED) && defined(IN_RING3)
4389 if (LogIs3Enabled())
4390 {
4391 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4392 char szRegs[4096];
4393 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4394 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4395 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4396 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4397 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4398 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4399 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4400 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4401 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4402 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4403 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4404 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4405 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4406 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4407 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4408 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4409 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4410 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4411 " efer=%016VR{efer}\n"
4412 " pat=%016VR{pat}\n"
4413 " sf_mask=%016VR{sf_mask}\n"
4414 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4415 " lstar=%016VR{lstar}\n"
4416 " star=%016VR{star} cstar=%016VR{cstar}\n"
4417 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4418 );
4419
4420 char szInstr[256];
4421 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4422 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4423 szInstr, sizeof(szInstr), NULL);
4424 Log3(("%s%s\n", szRegs, szInstr));
4425 }
4426#endif /* LOG_ENABLED */
4427
4428 /*
4429 * Stats.
4430 */
4431 uint64_t const uTimestamp = ASMReadTSC();
4432 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4433 {
4434 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4435 EMHistoryAddExit(pVCpu,
4436 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4437 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4438 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4439 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4440 }
4441 else
4442 {
4443 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4444 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4445 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4446 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4447 if (fFlags & IEM_XCPT_FLAGS_ERR)
4448 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4449 if (fFlags & IEM_XCPT_FLAGS_CR2)
4450 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4451 }
4452
4453 /*
4454 * Hack alert! Convert incoming debug events to slient on Intel.
4455 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
4456 */
4457 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4458 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
4459 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
4460 { /* ignore */ }
4461 else
4462 {
4463 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
4464 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
4465 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
4466 | CPUMCTX_DBG_HIT_DRX_SILENT;
4467 }
4468
4469 /*
4470 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4471 * to ensure that a stale TLB or paging cache entry will only cause one
4472 * spurious #PF.
4473 */
4474 if ( u8Vector == X86_XCPT_PF
4475 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4476 IEMTlbInvalidatePage(pVCpu, uCr2);
4477
4478 /*
4479 * Call the mode specific worker function.
4480 */
4481 VBOXSTRICTRC rcStrict;
4482 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4483 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4484 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4485 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4486 else
4487 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4488
4489 /* Flush the prefetch buffer. */
4490 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4491
4492 /*
4493 * Unwind.
4494 */
4495 pVCpu->iem.s.cXcptRecursions--;
4496 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4497 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4498 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4499 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4500 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4501 return rcStrict;
4502}
4503
4504#ifdef IEM_WITH_SETJMP
4505/**
4506 * See iemRaiseXcptOrInt. Will not return.
4507 */
4508DECL_NO_RETURN(void)
4509iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4510 uint8_t cbInstr,
4511 uint8_t u8Vector,
4512 uint32_t fFlags,
4513 uint16_t uErr,
4514 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4515{
4516 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4517 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4518}
4519#endif
4520
4521
4522/** \#DE - 00. */
4523VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4524{
4525 if (GCMIsInterceptingXcptDE(pVCpu))
4526 {
4527 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
4528 if (rc == VINF_SUCCESS)
4529 {
4530 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
4531 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
4532 }
4533 }
4534 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4535}
4536
4537
4538#ifdef IEM_WITH_SETJMP
4539/** \#DE - 00. */
4540DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4541{
4542 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4543}
4544#endif
4545
4546
4547/** \#DB - 01.
4548 * @note This automatically clear DR7.GD. */
4549VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4550{
4551 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4552 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4553 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4554}
4555
4556
4557/** \#BR - 05. */
4558VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4559{
4560 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4561}
4562
4563
4564/** \#UD - 06. */
4565VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4566{
4567 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4568}
4569
4570
4571#ifdef IEM_WITH_SETJMP
4572/** \#UD - 06. */
4573DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4574{
4575 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4576}
4577#endif
4578
4579
4580/** \#NM - 07. */
4581VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4582{
4583 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4584}
4585
4586
4587#ifdef IEM_WITH_SETJMP
4588/** \#NM - 07. */
4589DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4590{
4591 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4592}
4593#endif
4594
4595
4596/** \#TS(err) - 0a. */
4597VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4598{
4599 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4600}
4601
4602
4603/** \#TS(tr) - 0a. */
4604VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4605{
4606 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4607 pVCpu->cpum.GstCtx.tr.Sel, 0);
4608}
4609
4610
4611/** \#TS(0) - 0a. */
4612VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4613{
4614 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4615 0, 0);
4616}
4617
4618
4619/** \#TS(err) - 0a. */
4620VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4621{
4622 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4623 uSel & X86_SEL_MASK_OFF_RPL, 0);
4624}
4625
4626
4627/** \#NP(err) - 0b. */
4628VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4629{
4630 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4631}
4632
4633
4634/** \#NP(sel) - 0b. */
4635VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4636{
4637 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4638 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4639 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4640 uSel & ~X86_SEL_RPL, 0);
4641}
4642
4643
4644/** \#SS(seg) - 0c. */
4645VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4646{
4647 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4648 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4649 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4650 uSel & ~X86_SEL_RPL, 0);
4651}
4652
4653
4654/** \#SS(err) - 0c. */
4655VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4656{
4657 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4658 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4659 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4660}
4661
4662
4663/** \#GP(n) - 0d. */
4664VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4665{
4666 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4667 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4668}
4669
4670
4671/** \#GP(0) - 0d. */
4672VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4673{
4674 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4675 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4676}
4677
4678#ifdef IEM_WITH_SETJMP
4679/** \#GP(0) - 0d. */
4680DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4681{
4682 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4683 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4684}
4685#endif
4686
4687
4688/** \#GP(sel) - 0d. */
4689VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4690{
4691 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4692 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4693 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4694 Sel & ~X86_SEL_RPL, 0);
4695}
4696
4697
4698/** \#GP(0) - 0d. */
4699VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4700{
4701 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4702 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4703}
4704
4705
4706/** \#GP(sel) - 0d. */
4707VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4708{
4709 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4710 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4711 NOREF(iSegReg); NOREF(fAccess);
4712 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4713 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4714}
4715
4716#ifdef IEM_WITH_SETJMP
4717/** \#GP(sel) - 0d, longjmp. */
4718DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4719{
4720 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4721 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4722 NOREF(iSegReg); NOREF(fAccess);
4723 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4724 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4725}
4726#endif
4727
4728/** \#GP(sel) - 0d. */
4729VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4730{
4731 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4732 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4733 NOREF(Sel);
4734 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4735}
4736
4737#ifdef IEM_WITH_SETJMP
4738/** \#GP(sel) - 0d, longjmp. */
4739DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4740{
4741 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4742 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4743 NOREF(Sel);
4744 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4745}
4746#endif
4747
4748
4749/** \#GP(sel) - 0d. */
4750VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4751{
4752 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4753 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4754 NOREF(iSegReg); NOREF(fAccess);
4755 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4756}
4757
4758#ifdef IEM_WITH_SETJMP
4759/** \#GP(sel) - 0d, longjmp. */
4760DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4761{
4762 NOREF(iSegReg); NOREF(fAccess);
4763 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4764}
4765#endif
4766
4767
4768/** \#PF(n) - 0e. */
4769VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4770{
4771 uint16_t uErr;
4772 switch (rc)
4773 {
4774 case VERR_PAGE_NOT_PRESENT:
4775 case VERR_PAGE_TABLE_NOT_PRESENT:
4776 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4777 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4778 uErr = 0;
4779 break;
4780
4781 case VERR_RESERVED_PAGE_TABLE_BITS:
4782 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
4783 break;
4784
4785 default:
4786 AssertMsgFailed(("%Rrc\n", rc));
4787 RT_FALL_THRU();
4788 case VERR_ACCESS_DENIED:
4789 uErr = X86_TRAP_PF_P;
4790 break;
4791 }
4792
4793 if (IEM_GET_CPL(pVCpu) == 3)
4794 uErr |= X86_TRAP_PF_US;
4795
4796 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4797 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4798 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4799 uErr |= X86_TRAP_PF_ID;
4800
4801#if 0 /* This is so much non-sense, really. Why was it done like that? */
4802 /* Note! RW access callers reporting a WRITE protection fault, will clear
4803 the READ flag before calling. So, read-modify-write accesses (RW)
4804 can safely be reported as READ faults. */
4805 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4806 uErr |= X86_TRAP_PF_RW;
4807#else
4808 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4809 {
4810 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4811 /// (regardless of outcome of the comparison in the latter case).
4812 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4813 uErr |= X86_TRAP_PF_RW;
4814 }
4815#endif
4816
4817 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4818 of the memory operand rather than at the start of it. (Not sure what
4819 happens if it crosses a page boundrary.) The current heuristics for
4820 this is to report the #PF for the last byte if the access is more than
4821 64 bytes. This is probably not correct, but we can work that out later,
4822 main objective now is to get FXSAVE to work like for real hardware and
4823 make bs3-cpu-basic2 work. */
4824 if (cbAccess <= 64)
4825 { /* likely*/ }
4826 else
4827 GCPtrWhere += cbAccess - 1;
4828
4829 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4830 uErr, GCPtrWhere);
4831}
4832
4833#ifdef IEM_WITH_SETJMP
4834/** \#PF(n) - 0e, longjmp. */
4835DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4836 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4837{
4838 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4839}
4840#endif
4841
4842
4843/** \#MF(0) - 10. */
4844VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4845{
4846 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4847 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4848
4849 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4850 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4851 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4852}
4853
4854#ifdef IEM_WITH_SETJMP
4855/** \#MF(0) - 10, longjmp. */
4856DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4857{
4858 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4859}
4860#endif
4861
4862
4863/** \#AC(0) - 11. */
4864VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4865{
4866 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4867}
4868
4869#ifdef IEM_WITH_SETJMP
4870/** \#AC(0) - 11, longjmp. */
4871DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4872{
4873 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4874}
4875#endif
4876
4877
4878/** \#XF(0)/\#XM(0) - 19. */
4879VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4880{
4881 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4882}
4883
4884
4885#ifdef IEM_WITH_SETJMP
4886/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4887DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4888{
4889 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4890}
4891#endif
4892
4893
4894/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4895IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4896{
4897 NOREF(cbInstr);
4898 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4899}
4900
4901
4902/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4903IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4904{
4905 NOREF(cbInstr);
4906 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4907}
4908
4909
4910/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4911IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4912{
4913 NOREF(cbInstr);
4914 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4915}
4916
4917
4918/** @} */
4919
4920/** @name Common opcode decoders.
4921 * @{
4922 */
4923//#include <iprt/mem.h>
4924
4925/**
4926 * Used to add extra details about a stub case.
4927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4928 */
4929void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4930{
4931#if defined(LOG_ENABLED) && defined(IN_RING3)
4932 PVM pVM = pVCpu->CTX_SUFF(pVM);
4933 char szRegs[4096];
4934 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4935 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4936 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4937 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4938 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4939 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4940 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4941 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4942 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4943 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4944 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4945 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4946 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4947 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4948 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4949 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4950 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4951 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4952 " efer=%016VR{efer}\n"
4953 " pat=%016VR{pat}\n"
4954 " sf_mask=%016VR{sf_mask}\n"
4955 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4956 " lstar=%016VR{lstar}\n"
4957 " star=%016VR{star} cstar=%016VR{cstar}\n"
4958 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4959 );
4960
4961 char szInstr[256];
4962 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4963 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4964 szInstr, sizeof(szInstr), NULL);
4965
4966 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4967#else
4968 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4969#endif
4970}
4971
4972/** @} */
4973
4974
4975
4976/** @name Register Access.
4977 * @{
4978 */
4979
4980/**
4981 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4982 *
4983 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4984 * segment limit.
4985 *
4986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4987 * @param cbInstr Instruction size.
4988 * @param offNextInstr The offset of the next instruction.
4989 * @param enmEffOpSize Effective operand size.
4990 */
4991VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4992 IEMMODE enmEffOpSize) RT_NOEXCEPT
4993{
4994 switch (enmEffOpSize)
4995 {
4996 case IEMMODE_16BIT:
4997 {
4998 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4999 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5000 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
5001 pVCpu->cpum.GstCtx.rip = uNewIp;
5002 else
5003 return iemRaiseGeneralProtectionFault0(pVCpu);
5004 break;
5005 }
5006
5007 case IEMMODE_32BIT:
5008 {
5009 Assert(!IEM_IS_64BIT_CODE(pVCpu));
5010 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
5011
5012 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
5013 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5014 pVCpu->cpum.GstCtx.rip = uNewEip;
5015 else
5016 return iemRaiseGeneralProtectionFault0(pVCpu);
5017 break;
5018 }
5019
5020 case IEMMODE_64BIT:
5021 {
5022 Assert(IEM_IS_64BIT_CODE(pVCpu));
5023
5024 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5025 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5026 pVCpu->cpum.GstCtx.rip = uNewRip;
5027 else
5028 return iemRaiseGeneralProtectionFault0(pVCpu);
5029 break;
5030 }
5031
5032 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5033 }
5034
5035#ifndef IEM_WITH_CODE_TLB
5036 /* Flush the prefetch buffer. */
5037 pVCpu->iem.s.cbOpcode = cbInstr;
5038#endif
5039
5040 /*
5041 * Clear RF and finish the instruction (maybe raise #DB).
5042 */
5043 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5044}
5045
5046
5047/**
5048 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5049 *
5050 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5051 * segment limit.
5052 *
5053 * @returns Strict VBox status code.
5054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5055 * @param cbInstr Instruction size.
5056 * @param offNextInstr The offset of the next instruction.
5057 */
5058VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
5059{
5060 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5061
5062 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
5063 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
5064 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
5065 pVCpu->cpum.GstCtx.rip = uNewIp;
5066 else
5067 return iemRaiseGeneralProtectionFault0(pVCpu);
5068
5069#ifndef IEM_WITH_CODE_TLB
5070 /* Flush the prefetch buffer. */
5071 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5072#endif
5073
5074 /*
5075 * Clear RF and finish the instruction (maybe raise #DB).
5076 */
5077 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5078}
5079
5080
5081/**
5082 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5083 *
5084 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5085 * segment limit.
5086 *
5087 * @returns Strict VBox status code.
5088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5089 * @param cbInstr Instruction size.
5090 * @param offNextInstr The offset of the next instruction.
5091 * @param enmEffOpSize Effective operand size.
5092 */
5093VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5094 IEMMODE enmEffOpSize) RT_NOEXCEPT
5095{
5096 if (enmEffOpSize == IEMMODE_32BIT)
5097 {
5098 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
5099
5100 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
5101 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
5102 pVCpu->cpum.GstCtx.rip = uNewEip;
5103 else
5104 return iemRaiseGeneralProtectionFault0(pVCpu);
5105 }
5106 else
5107 {
5108 Assert(enmEffOpSize == IEMMODE_64BIT);
5109
5110 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
5111 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
5112 pVCpu->cpum.GstCtx.rip = uNewRip;
5113 else
5114 return iemRaiseGeneralProtectionFault0(pVCpu);
5115 }
5116
5117#ifndef IEM_WITH_CODE_TLB
5118 /* Flush the prefetch buffer. */
5119 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5120#endif
5121
5122 /*
5123 * Clear RF and finish the instruction (maybe raise #DB).
5124 */
5125 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
5126}
5127
5128/** @} */
5129
5130
5131/** @name FPU access and helpers.
5132 *
5133 * @{
5134 */
5135
5136/**
5137 * Updates the x87.DS and FPUDP registers.
5138 *
5139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5140 * @param pFpuCtx The FPU context.
5141 * @param iEffSeg The effective segment register.
5142 * @param GCPtrEff The effective address relative to @a iEffSeg.
5143 */
5144DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
5145{
5146 RTSEL sel;
5147 switch (iEffSeg)
5148 {
5149 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
5150 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
5151 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
5152 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
5153 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
5154 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
5155 default:
5156 AssertMsgFailed(("%d\n", iEffSeg));
5157 sel = pVCpu->cpum.GstCtx.ds.Sel;
5158 }
5159 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
5160 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5161 {
5162 pFpuCtx->DS = 0;
5163 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
5164 }
5165 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
5166 {
5167 pFpuCtx->DS = sel;
5168 pFpuCtx->FPUDP = GCPtrEff;
5169 }
5170 else
5171 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
5172}
5173
5174
5175/**
5176 * Rotates the stack registers in the push direction.
5177 *
5178 * @param pFpuCtx The FPU context.
5179 * @remarks This is a complete waste of time, but fxsave stores the registers in
5180 * stack order.
5181 */
5182DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
5183{
5184 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
5185 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
5186 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
5187 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
5188 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
5189 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
5190 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
5191 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
5192 pFpuCtx->aRegs[0].r80 = r80Tmp;
5193}
5194
5195
5196/**
5197 * Rotates the stack registers in the pop direction.
5198 *
5199 * @param pFpuCtx The FPU context.
5200 * @remarks This is a complete waste of time, but fxsave stores the registers in
5201 * stack order.
5202 */
5203DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
5204{
5205 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
5206 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
5207 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
5208 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
5209 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
5210 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
5211 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
5212 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
5213 pFpuCtx->aRegs[7].r80 = r80Tmp;
5214}
5215
5216
5217/**
5218 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
5219 * exception prevents it.
5220 *
5221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5222 * @param pResult The FPU operation result to push.
5223 * @param pFpuCtx The FPU context.
5224 */
5225static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5226{
5227 /* Update FSW and bail if there are pending exceptions afterwards. */
5228 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5229 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5230 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5231 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5232 {
5233 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
5234 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
5235 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5236 pFpuCtx->FSW = fFsw;
5237 return;
5238 }
5239
5240 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5241 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5242 {
5243 /* All is fine, push the actual value. */
5244 pFpuCtx->FTW |= RT_BIT(iNewTop);
5245 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
5246 }
5247 else if (pFpuCtx->FCW & X86_FCW_IM)
5248 {
5249 /* Masked stack overflow, push QNaN. */
5250 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5251 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5252 }
5253 else
5254 {
5255 /* Raise stack overflow, don't push anything. */
5256 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5257 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5258 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5259 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5260 return;
5261 }
5262
5263 fFsw &= ~X86_FSW_TOP_MASK;
5264 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5265 pFpuCtx->FSW = fFsw;
5266
5267 iemFpuRotateStackPush(pFpuCtx);
5268 RT_NOREF(pVCpu);
5269}
5270
5271
5272/**
5273 * Stores a result in a FPU register and updates the FSW and FTW.
5274 *
5275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5276 * @param pFpuCtx The FPU context.
5277 * @param pResult The result to store.
5278 * @param iStReg Which FPU register to store it in.
5279 */
5280static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
5281{
5282 Assert(iStReg < 8);
5283 uint16_t fNewFsw = pFpuCtx->FSW;
5284 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
5285 fNewFsw &= ~X86_FSW_C_MASK;
5286 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5287 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5288 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5289 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5290 pFpuCtx->FSW = fNewFsw;
5291 pFpuCtx->FTW |= RT_BIT(iReg);
5292 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
5293 RT_NOREF(pVCpu);
5294}
5295
5296
5297/**
5298 * Only updates the FPU status word (FSW) with the result of the current
5299 * instruction.
5300 *
5301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5302 * @param pFpuCtx The FPU context.
5303 * @param u16FSW The FSW output of the current instruction.
5304 */
5305static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
5306{
5307 uint16_t fNewFsw = pFpuCtx->FSW;
5308 fNewFsw &= ~X86_FSW_C_MASK;
5309 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
5310 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5311 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
5312 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
5313 pFpuCtx->FSW = fNewFsw;
5314 RT_NOREF(pVCpu);
5315}
5316
5317
5318/**
5319 * Pops one item off the FPU stack if no pending exception prevents it.
5320 *
5321 * @param pFpuCtx The FPU context.
5322 */
5323static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5324{
5325 /* Check pending exceptions. */
5326 uint16_t uFSW = pFpuCtx->FSW;
5327 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5328 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5329 return;
5330
5331 /* TOP--. */
5332 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
5333 uFSW &= ~X86_FSW_TOP_MASK;
5334 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
5335 pFpuCtx->FSW = uFSW;
5336
5337 /* Mark the previous ST0 as empty. */
5338 iOldTop >>= X86_FSW_TOP_SHIFT;
5339 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5340
5341 /* Rotate the registers. */
5342 iemFpuRotateStackPop(pFpuCtx);
5343}
5344
5345
5346/**
5347 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5348 *
5349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5350 * @param pResult The FPU operation result to push.
5351 * @param uFpuOpcode The FPU opcode value.
5352 */
5353void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5354{
5355 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5356 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5357 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5358}
5359
5360
5361/**
5362 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5363 * and sets FPUDP and FPUDS.
5364 *
5365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5366 * @param pResult The FPU operation result to push.
5367 * @param iEffSeg The effective segment register.
5368 * @param GCPtrEff The effective address relative to @a iEffSeg.
5369 * @param uFpuOpcode The FPU opcode value.
5370 */
5371void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5372 uint16_t uFpuOpcode) RT_NOEXCEPT
5373{
5374 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5375 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5376 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5377 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5378}
5379
5380
5381/**
5382 * Replace ST0 with the first value and push the second onto the FPU stack,
5383 * unless a pending exception prevents it.
5384 *
5385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5386 * @param pResult The FPU operation result to store and push.
5387 * @param uFpuOpcode The FPU opcode value.
5388 */
5389void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5390{
5391 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5392 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5393
5394 /* Update FSW and bail if there are pending exceptions afterwards. */
5395 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5396 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5397 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5398 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5399 {
5400 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5401 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5402 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5403 pFpuCtx->FSW = fFsw;
5404 return;
5405 }
5406
5407 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5408 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5409 {
5410 /* All is fine, push the actual value. */
5411 pFpuCtx->FTW |= RT_BIT(iNewTop);
5412 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5413 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5414 }
5415 else if (pFpuCtx->FCW & X86_FCW_IM)
5416 {
5417 /* Masked stack overflow, push QNaN. */
5418 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5419 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5420 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5421 }
5422 else
5423 {
5424 /* Raise stack overflow, don't push anything. */
5425 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5426 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5427 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5428 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5429 return;
5430 }
5431
5432 fFsw &= ~X86_FSW_TOP_MASK;
5433 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5434 pFpuCtx->FSW = fFsw;
5435
5436 iemFpuRotateStackPush(pFpuCtx);
5437}
5438
5439
5440/**
5441 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5442 * FOP.
5443 *
5444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5445 * @param pResult The result to store.
5446 * @param iStReg Which FPU register to store it in.
5447 * @param uFpuOpcode The FPU opcode value.
5448 */
5449void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5450{
5451 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5452 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5453 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5454}
5455
5456
5457/**
5458 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5459 * FOP, and then pops the stack.
5460 *
5461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5462 * @param pResult The result to store.
5463 * @param iStReg Which FPU register to store it in.
5464 * @param uFpuOpcode The FPU opcode value.
5465 */
5466void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5467{
5468 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5469 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5470 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5471 iemFpuMaybePopOne(pFpuCtx);
5472}
5473
5474
5475/**
5476 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5477 * FPUDP, and FPUDS.
5478 *
5479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5480 * @param pResult The result to store.
5481 * @param iStReg Which FPU register to store it in.
5482 * @param iEffSeg The effective memory operand selector register.
5483 * @param GCPtrEff The effective memory operand offset.
5484 * @param uFpuOpcode The FPU opcode value.
5485 */
5486void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5487 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5488{
5489 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5490 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5491 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5492 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5493}
5494
5495
5496/**
5497 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5498 * FPUDP, and FPUDS, and then pops the stack.
5499 *
5500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5501 * @param pResult The result to store.
5502 * @param iStReg Which FPU register to store it in.
5503 * @param iEffSeg The effective memory operand selector register.
5504 * @param GCPtrEff The effective memory operand offset.
5505 * @param uFpuOpcode The FPU opcode value.
5506 */
5507void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5508 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5509{
5510 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5511 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5512 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5513 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5514 iemFpuMaybePopOne(pFpuCtx);
5515}
5516
5517
5518/**
5519 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5520 *
5521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5522 * @param uFpuOpcode The FPU opcode value.
5523 */
5524void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5525{
5526 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5527 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5528}
5529
5530
5531/**
5532 * Updates the FSW, FOP, FPUIP, and FPUCS.
5533 *
5534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5535 * @param u16FSW The FSW from the current instruction.
5536 * @param uFpuOpcode The FPU opcode value.
5537 */
5538void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5539{
5540 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5541 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5542 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5543}
5544
5545
5546/**
5547 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5548 *
5549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5550 * @param u16FSW The FSW from the current instruction.
5551 * @param uFpuOpcode The FPU opcode value.
5552 */
5553void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5554{
5555 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5556 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5557 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5558 iemFpuMaybePopOne(pFpuCtx);
5559}
5560
5561
5562/**
5563 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5564 *
5565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5566 * @param u16FSW The FSW from the current instruction.
5567 * @param iEffSeg The effective memory operand selector register.
5568 * @param GCPtrEff The effective memory operand offset.
5569 * @param uFpuOpcode The FPU opcode value.
5570 */
5571void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5572{
5573 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5574 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5575 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5576 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5577}
5578
5579
5580/**
5581 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5582 *
5583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5584 * @param u16FSW The FSW from the current instruction.
5585 * @param uFpuOpcode The FPU opcode value.
5586 */
5587void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5588{
5589 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5590 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5591 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5592 iemFpuMaybePopOne(pFpuCtx);
5593 iemFpuMaybePopOne(pFpuCtx);
5594}
5595
5596
5597/**
5598 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5599 *
5600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5601 * @param u16FSW The FSW from the current instruction.
5602 * @param iEffSeg The effective memory operand selector register.
5603 * @param GCPtrEff The effective memory operand offset.
5604 * @param uFpuOpcode The FPU opcode value.
5605 */
5606void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5607 uint16_t uFpuOpcode) RT_NOEXCEPT
5608{
5609 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5610 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5611 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5612 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5613 iemFpuMaybePopOne(pFpuCtx);
5614}
5615
5616
5617/**
5618 * Worker routine for raising an FPU stack underflow exception.
5619 *
5620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5621 * @param pFpuCtx The FPU context.
5622 * @param iStReg The stack register being accessed.
5623 */
5624static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5625{
5626 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5627 if (pFpuCtx->FCW & X86_FCW_IM)
5628 {
5629 /* Masked underflow. */
5630 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5631 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5632 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5633 if (iStReg != UINT8_MAX)
5634 {
5635 pFpuCtx->FTW |= RT_BIT(iReg);
5636 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5637 }
5638 }
5639 else
5640 {
5641 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5642 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5643 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5644 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5645 }
5646 RT_NOREF(pVCpu);
5647}
5648
5649
5650/**
5651 * Raises a FPU stack underflow exception.
5652 *
5653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5654 * @param iStReg The destination register that should be loaded
5655 * with QNaN if \#IS is not masked. Specify
5656 * UINT8_MAX if none (like for fcom).
5657 * @param uFpuOpcode The FPU opcode value.
5658 */
5659void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5660{
5661 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5662 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5663 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5664}
5665
5666
5667void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5668{
5669 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5670 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5671 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5672 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5673}
5674
5675
5676void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5677{
5678 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5679 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5680 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5681 iemFpuMaybePopOne(pFpuCtx);
5682}
5683
5684
5685void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5686 uint16_t uFpuOpcode) RT_NOEXCEPT
5687{
5688 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5689 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5690 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5691 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5692 iemFpuMaybePopOne(pFpuCtx);
5693}
5694
5695
5696void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5697{
5698 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5699 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5700 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5701 iemFpuMaybePopOne(pFpuCtx);
5702 iemFpuMaybePopOne(pFpuCtx);
5703}
5704
5705
5706void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5707{
5708 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5709 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5710
5711 if (pFpuCtx->FCW & X86_FCW_IM)
5712 {
5713 /* Masked overflow - Push QNaN. */
5714 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5715 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5716 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5717 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5718 pFpuCtx->FTW |= RT_BIT(iNewTop);
5719 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5720 iemFpuRotateStackPush(pFpuCtx);
5721 }
5722 else
5723 {
5724 /* Exception pending - don't change TOP or the register stack. */
5725 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5726 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5727 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5728 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5729 }
5730}
5731
5732
5733void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5734{
5735 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5736 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5737
5738 if (pFpuCtx->FCW & X86_FCW_IM)
5739 {
5740 /* Masked overflow - Push QNaN. */
5741 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5742 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5743 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5744 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5745 pFpuCtx->FTW |= RT_BIT(iNewTop);
5746 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5747 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5748 iemFpuRotateStackPush(pFpuCtx);
5749 }
5750 else
5751 {
5752 /* Exception pending - don't change TOP or the register stack. */
5753 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5754 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5755 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5756 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5757 }
5758}
5759
5760
5761/**
5762 * Worker routine for raising an FPU stack overflow exception on a push.
5763 *
5764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5765 * @param pFpuCtx The FPU context.
5766 */
5767static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5768{
5769 if (pFpuCtx->FCW & X86_FCW_IM)
5770 {
5771 /* Masked overflow. */
5772 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5773 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5774 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5775 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5776 pFpuCtx->FTW |= RT_BIT(iNewTop);
5777 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5778 iemFpuRotateStackPush(pFpuCtx);
5779 }
5780 else
5781 {
5782 /* Exception pending - don't change TOP or the register stack. */
5783 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5784 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5785 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5786 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5787 }
5788 RT_NOREF(pVCpu);
5789}
5790
5791
5792/**
5793 * Raises a FPU stack overflow exception on a push.
5794 *
5795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5796 * @param uFpuOpcode The FPU opcode value.
5797 */
5798void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5799{
5800 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5801 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5802 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5803}
5804
5805
5806/**
5807 * Raises a FPU stack overflow exception on a push with a memory operand.
5808 *
5809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5810 * @param iEffSeg The effective memory operand selector register.
5811 * @param GCPtrEff The effective memory operand offset.
5812 * @param uFpuOpcode The FPU opcode value.
5813 */
5814void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5815{
5816 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5817 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5818 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5819 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5820}
5821
5822/** @} */
5823
5824
5825/** @name Memory access.
5826 *
5827 * @{
5828 */
5829
5830#undef LOG_GROUP
5831#define LOG_GROUP LOG_GROUP_IEM_MEM
5832
5833/**
5834 * Updates the IEMCPU::cbWritten counter if applicable.
5835 *
5836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5837 * @param fAccess The access being accounted for.
5838 * @param cbMem The access size.
5839 */
5840DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5841{
5842 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5843 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5844 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5845}
5846
5847
5848/**
5849 * Applies the segment limit, base and attributes.
5850 *
5851 * This may raise a \#GP or \#SS.
5852 *
5853 * @returns VBox strict status code.
5854 *
5855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5856 * @param fAccess The kind of access which is being performed.
5857 * @param iSegReg The index of the segment register to apply.
5858 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5859 * TSS, ++).
5860 * @param cbMem The access size.
5861 * @param pGCPtrMem Pointer to the guest memory address to apply
5862 * segmentation to. Input and output parameter.
5863 */
5864VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5865{
5866 if (iSegReg == UINT8_MAX)
5867 return VINF_SUCCESS;
5868
5869 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5870 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5871 switch (IEM_GET_CPU_MODE(pVCpu))
5872 {
5873 case IEMMODE_16BIT:
5874 case IEMMODE_32BIT:
5875 {
5876 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5877 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5878
5879 if ( pSel->Attr.n.u1Present
5880 && !pSel->Attr.n.u1Unusable)
5881 {
5882 Assert(pSel->Attr.n.u1DescType);
5883 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5884 {
5885 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5886 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5887 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5888
5889 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5890 {
5891 /** @todo CPL check. */
5892 }
5893
5894 /*
5895 * There are two kinds of data selectors, normal and expand down.
5896 */
5897 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5898 {
5899 if ( GCPtrFirst32 > pSel->u32Limit
5900 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5901 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5902 }
5903 else
5904 {
5905 /*
5906 * The upper boundary is defined by the B bit, not the G bit!
5907 */
5908 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5909 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5910 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5911 }
5912 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5913 }
5914 else
5915 {
5916 /*
5917 * Code selector and usually be used to read thru, writing is
5918 * only permitted in real and V8086 mode.
5919 */
5920 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5921 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5922 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5923 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5924 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5925
5926 if ( GCPtrFirst32 > pSel->u32Limit
5927 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5928 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5929
5930 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5931 {
5932 /** @todo CPL check. */
5933 }
5934
5935 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5936 }
5937 }
5938 else
5939 return iemRaiseGeneralProtectionFault0(pVCpu);
5940 return VINF_SUCCESS;
5941 }
5942
5943 case IEMMODE_64BIT:
5944 {
5945 RTGCPTR GCPtrMem = *pGCPtrMem;
5946 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5947 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5948
5949 Assert(cbMem >= 1);
5950 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5951 return VINF_SUCCESS;
5952 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5953 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5954 return iemRaiseGeneralProtectionFault0(pVCpu);
5955 }
5956
5957 default:
5958 AssertFailedReturn(VERR_IEM_IPE_7);
5959 }
5960}
5961
5962
5963/**
5964 * Translates a virtual address to a physical physical address and checks if we
5965 * can access the page as specified.
5966 *
5967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5968 * @param GCPtrMem The virtual address.
5969 * @param cbAccess The access size, for raising \#PF correctly for
5970 * FXSAVE and such.
5971 * @param fAccess The intended access.
5972 * @param pGCPhysMem Where to return the physical address.
5973 */
5974VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5975 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5976{
5977 /** @todo Need a different PGM interface here. We're currently using
5978 * generic / REM interfaces. this won't cut it for R0. */
5979 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5980 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5981 * here. */
5982 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5983 PGMPTWALKFAST WalkFast;
5984 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
5985 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
5986 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
5987 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
5988 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
5989 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
5990 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5991 fQPage |= PGMQPAGE_F_USER_MODE;
5992 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
5993 if (RT_SUCCESS(rc))
5994 {
5995 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
5996
5997 /* If the page is writable and does not have the no-exec bit set, all
5998 access is allowed. Otherwise we'll have to check more carefully... */
5999 Assert( (WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) == (X86_PTE_RW | X86_PTE_US)
6000 || ( ( !(fAccess & IEM_ACCESS_TYPE_WRITE)
6001 || (WalkFast.fEffective & X86_PTE_RW)
6002 || ( ( IEM_GET_CPL(pVCpu) != 3
6003 || (fAccess & IEM_ACCESS_WHAT_SYS))
6004 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)) )
6005 && ( (WalkFast.fEffective & X86_PTE_US)
6006 || IEM_GET_CPL(pVCpu) != 3
6007 || (fAccess & IEM_ACCESS_WHAT_SYS) )
6008 && ( !(fAccess & IEM_ACCESS_TYPE_EXEC)
6009 || !(WalkFast.fEffective & X86_PTE_PAE_NX)
6010 || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
6011 )
6012 );
6013
6014 /* PGMGstQueryPageFast sets the A & D bits. */
6015 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6016 Assert(!(~WalkFast.fEffective & (fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A)));
6017
6018 *pGCPhysMem = WalkFast.GCPhys;
6019 return VINF_SUCCESS;
6020 }
6021
6022 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6023 /** @todo Check unassigned memory in unpaged mode. */
6024#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6025 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6026 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6027#endif
6028 *pGCPhysMem = NIL_RTGCPHYS;
6029 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
6030}
6031
6032#if 0 /*unused*/
6033/**
6034 * Looks up a memory mapping entry.
6035 *
6036 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
6037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6038 * @param pvMem The memory address.
6039 * @param fAccess The access to.
6040 */
6041DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
6042{
6043 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6044 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
6045 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
6046 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6047 return 0;
6048 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
6049 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6050 return 1;
6051 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
6052 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
6053 return 2;
6054 return VERR_NOT_FOUND;
6055}
6056#endif
6057
6058/**
6059 * Finds a free memmap entry when using iNextMapping doesn't work.
6060 *
6061 * @returns Memory mapping index, 1024 on failure.
6062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6063 */
6064static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
6065{
6066 /*
6067 * The easy case.
6068 */
6069 if (pVCpu->iem.s.cActiveMappings == 0)
6070 {
6071 pVCpu->iem.s.iNextMapping = 1;
6072 return 0;
6073 }
6074
6075 /* There should be enough mappings for all instructions. */
6076 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
6077
6078 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
6079 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
6080 return i;
6081
6082 AssertFailedReturn(1024);
6083}
6084
6085
6086/**
6087 * Commits a bounce buffer that needs writing back and unmaps it.
6088 *
6089 * @returns Strict VBox status code.
6090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6091 * @param iMemMap The index of the buffer to commit.
6092 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
6093 * Always false in ring-3, obviously.
6094 */
6095static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
6096{
6097 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
6098 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
6099#ifdef IN_RING3
6100 Assert(!fPostponeFail);
6101 RT_NOREF_PV(fPostponeFail);
6102#endif
6103
6104 /*
6105 * Do the writing.
6106 */
6107 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6108 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
6109 {
6110 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
6111 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6112 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6113 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6114 {
6115 /*
6116 * Carefully and efficiently dealing with access handler return
6117 * codes make this a little bloated.
6118 */
6119 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
6120 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6121 pbBuf,
6122 cbFirst,
6123 PGMACCESSORIGIN_IEM);
6124 if (rcStrict == VINF_SUCCESS)
6125 {
6126 if (cbSecond)
6127 {
6128 rcStrict = PGMPhysWrite(pVM,
6129 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6130 pbBuf + cbFirst,
6131 cbSecond,
6132 PGMACCESSORIGIN_IEM);
6133 if (rcStrict == VINF_SUCCESS)
6134 { /* nothing */ }
6135 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6136 {
6137 LogEx(LOG_GROUP_IEM,
6138 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
6139 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6140 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6141 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6142 }
6143#ifndef IN_RING3
6144 else if (fPostponeFail)
6145 {
6146 LogEx(LOG_GROUP_IEM,
6147 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6148 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6149 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6150 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6151 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6152 return iemSetPassUpStatus(pVCpu, rcStrict);
6153 }
6154#endif
6155 else
6156 {
6157 LogEx(LOG_GROUP_IEM,
6158 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6160 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6161 return rcStrict;
6162 }
6163 }
6164 }
6165 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6166 {
6167 if (!cbSecond)
6168 {
6169 LogEx(LOG_GROUP_IEM,
6170 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
6171 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6172 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6173 }
6174 else
6175 {
6176 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
6177 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6178 pbBuf + cbFirst,
6179 cbSecond,
6180 PGMACCESSORIGIN_IEM);
6181 if (rcStrict2 == VINF_SUCCESS)
6182 {
6183 LogEx(LOG_GROUP_IEM,
6184 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
6185 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6186 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6187 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6188 }
6189 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6190 {
6191 LogEx(LOG_GROUP_IEM,
6192 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
6193 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6194 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6195 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6196 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6197 }
6198#ifndef IN_RING3
6199 else if (fPostponeFail)
6200 {
6201 LogEx(LOG_GROUP_IEM,
6202 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6203 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6204 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6205 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
6206 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6207 return iemSetPassUpStatus(pVCpu, rcStrict);
6208 }
6209#endif
6210 else
6211 {
6212 LogEx(LOG_GROUP_IEM,
6213 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6214 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6215 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
6216 return rcStrict2;
6217 }
6218 }
6219 }
6220#ifndef IN_RING3
6221 else if (fPostponeFail)
6222 {
6223 LogEx(LOG_GROUP_IEM,
6224 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
6225 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6226 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6227 if (!cbSecond)
6228 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
6229 else
6230 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
6231 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
6232 return iemSetPassUpStatus(pVCpu, rcStrict);
6233 }
6234#endif
6235 else
6236 {
6237 LogEx(LOG_GROUP_IEM,
6238 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6239 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
6240 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6241 return rcStrict;
6242 }
6243 }
6244 else
6245 {
6246 /*
6247 * No access handlers, much simpler.
6248 */
6249 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
6250 if (RT_SUCCESS(rc))
6251 {
6252 if (cbSecond)
6253 {
6254 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
6255 if (RT_SUCCESS(rc))
6256 { /* likely */ }
6257 else
6258 {
6259 LogEx(LOG_GROUP_IEM,
6260 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
6261 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
6262 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
6263 return rc;
6264 }
6265 }
6266 }
6267 else
6268 {
6269 LogEx(LOG_GROUP_IEM,
6270 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6271 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6272 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6273 return rc;
6274 }
6275 }
6276 }
6277
6278#if defined(IEM_LOG_MEMORY_WRITES)
6279 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6280 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6281 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6282 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6283 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6284 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6285
6286 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6287 g_cbIemWrote = cbWrote;
6288 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6289#endif
6290
6291 /*
6292 * Free the mapping entry.
6293 */
6294 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6295 Assert(pVCpu->iem.s.cActiveMappings != 0);
6296 pVCpu->iem.s.cActiveMappings--;
6297 return VINF_SUCCESS;
6298}
6299
6300
6301/**
6302 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
6303 */
6304DECL_FORCE_INLINE(uint32_t)
6305iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
6306{
6307 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
6308 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6309 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6310 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
6311}
6312
6313
6314/**
6315 * iemMemMap worker that deals with a request crossing pages.
6316 */
6317static VBOXSTRICTRC
6318iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6319 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6320{
6321 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
6322 Assert(cbMem <= GUEST_PAGE_SIZE);
6323
6324 /*
6325 * Do the address translations.
6326 */
6327 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6328 RTGCPHYS GCPhysFirst;
6329 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6330 if (rcStrict != VINF_SUCCESS)
6331 return rcStrict;
6332 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6333
6334 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6335 RTGCPHYS GCPhysSecond;
6336 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6337 cbSecondPage, fAccess, &GCPhysSecond);
6338 if (rcStrict != VINF_SUCCESS)
6339 return rcStrict;
6340 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6341 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6342
6343 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6344
6345 /*
6346 * Check for data breakpoints.
6347 */
6348 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
6349 { /* likely */ }
6350 else
6351 {
6352 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
6353 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6354 cbSecondPage, fAccess);
6355 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6356 if (fDataBps > 1)
6357 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6358 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6359 }
6360
6361 /*
6362 * Read in the current memory content if it's a read, execute or partial
6363 * write access.
6364 */
6365 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6366
6367 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6368 {
6369 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6370 {
6371 /*
6372 * Must carefully deal with access handler status codes here,
6373 * makes the code a bit bloated.
6374 */
6375 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6376 if (rcStrict == VINF_SUCCESS)
6377 {
6378 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6379 if (rcStrict == VINF_SUCCESS)
6380 { /*likely */ }
6381 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6382 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6383 else
6384 {
6385 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6386 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6387 return rcStrict;
6388 }
6389 }
6390 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6391 {
6392 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6393 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6394 {
6395 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6396 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6397 }
6398 else
6399 {
6400 LogEx(LOG_GROUP_IEM,
6401 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6402 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6403 return rcStrict2;
6404 }
6405 }
6406 else
6407 {
6408 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6409 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6410 return rcStrict;
6411 }
6412 }
6413 else
6414 {
6415 /*
6416 * No informational status codes here, much more straight forward.
6417 */
6418 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6419 if (RT_SUCCESS(rc))
6420 {
6421 Assert(rc == VINF_SUCCESS);
6422 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6423 if (RT_SUCCESS(rc))
6424 Assert(rc == VINF_SUCCESS);
6425 else
6426 {
6427 LogEx(LOG_GROUP_IEM,
6428 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6429 return rc;
6430 }
6431 }
6432 else
6433 {
6434 LogEx(LOG_GROUP_IEM,
6435 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6436 return rc;
6437 }
6438 }
6439 }
6440#ifdef VBOX_STRICT
6441 else
6442 memset(pbBuf, 0xcc, cbMem);
6443 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6444 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6445#endif
6446 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6447
6448 /*
6449 * Commit the bounce buffer entry.
6450 */
6451 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6453 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6454 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6455 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6456 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6457 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6458 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6459 pVCpu->iem.s.cActiveMappings++;
6460
6461 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6462 *ppvMem = pbBuf;
6463 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6464 return VINF_SUCCESS;
6465}
6466
6467
6468/**
6469 * iemMemMap woker that deals with iemMemPageMap failures.
6470 */
6471static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6472 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6473{
6474 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
6475
6476 /*
6477 * Filter out conditions we can handle and the ones which shouldn't happen.
6478 */
6479 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6480 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6481 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6482 {
6483 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6484 return rcMap;
6485 }
6486 pVCpu->iem.s.cPotentialExits++;
6487
6488 /*
6489 * Read in the current memory content if it's a read, execute or partial
6490 * write access.
6491 */
6492 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6493 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6494 {
6495 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6496 memset(pbBuf, 0xff, cbMem);
6497 else
6498 {
6499 int rc;
6500 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6501 {
6502 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6503 if (rcStrict == VINF_SUCCESS)
6504 { /* nothing */ }
6505 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6506 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6507 else
6508 {
6509 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6510 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6511 return rcStrict;
6512 }
6513 }
6514 else
6515 {
6516 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6517 if (RT_SUCCESS(rc))
6518 { /* likely */ }
6519 else
6520 {
6521 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6522 GCPhysFirst, rc));
6523 return rc;
6524 }
6525 }
6526 }
6527 }
6528#ifdef VBOX_STRICT
6529 else
6530 memset(pbBuf, 0xcc, cbMem);
6531#endif
6532#ifdef VBOX_STRICT
6533 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6534 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6535#endif
6536
6537 /*
6538 * Commit the bounce buffer entry.
6539 */
6540 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6541 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6542 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6543 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6544 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6545 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6546 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6547 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6548 pVCpu->iem.s.cActiveMappings++;
6549
6550 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6551 *ppvMem = pbBuf;
6552 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6553 return VINF_SUCCESS;
6554}
6555
6556
6557
6558/**
6559 * Maps the specified guest memory for the given kind of access.
6560 *
6561 * This may be using bounce buffering of the memory if it's crossing a page
6562 * boundary or if there is an access handler installed for any of it. Because
6563 * of lock prefix guarantees, we're in for some extra clutter when this
6564 * happens.
6565 *
6566 * This may raise a \#GP, \#SS, \#PF or \#AC.
6567 *
6568 * @returns VBox strict status code.
6569 *
6570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6571 * @param ppvMem Where to return the pointer to the mapped memory.
6572 * @param pbUnmapInfo Where to return unmap info to be passed to
6573 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6574 * done.
6575 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6576 * 8, 12, 16, 32 or 512. When used by string operations
6577 * it can be up to a page.
6578 * @param iSegReg The index of the segment register to use for this
6579 * access. The base and limits are checked. Use UINT8_MAX
6580 * to indicate that no segmentation is required (for IDT,
6581 * GDT and LDT accesses).
6582 * @param GCPtrMem The address of the guest memory.
6583 * @param fAccess How the memory is being accessed. The
6584 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6585 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6586 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6587 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6588 * set.
6589 * @param uAlignCtl Alignment control:
6590 * - Bits 15:0 is the alignment mask.
6591 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6592 * IEM_MEMMAP_F_ALIGN_SSE, and
6593 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6594 * Pass zero to skip alignment.
6595 */
6596VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6597 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6598{
6599 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapNoJmp);
6600
6601 /*
6602 * Check the input and figure out which mapping entry to use.
6603 */
6604 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6605 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6606 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6607 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6608 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6609
6610 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6611 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6612 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6613 {
6614 iMemMap = iemMemMapFindFree(pVCpu);
6615 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6616 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6617 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6618 pVCpu->iem.s.aMemMappings[2].fAccess),
6619 VERR_IEM_IPE_9);
6620 }
6621
6622 /*
6623 * Map the memory, checking that we can actually access it. If something
6624 * slightly complicated happens, fall back on bounce buffering.
6625 */
6626 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6627 if (rcStrict == VINF_SUCCESS)
6628 { /* likely */ }
6629 else
6630 return rcStrict;
6631
6632 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6633 { /* likely */ }
6634 else
6635 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6636
6637 /*
6638 * Alignment check.
6639 */
6640 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6641 { /* likelyish */ }
6642 else
6643 {
6644 /* Misaligned access. */
6645 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6646 {
6647 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6648 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6649 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6650 {
6651 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6652
6653 if (!iemMemAreAlignmentChecksEnabled(pVCpu))
6654 { /* likely */ }
6655 else
6656 return iemRaiseAlignmentCheckException(pVCpu);
6657 }
6658 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6659 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6660 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6661 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6662 * that's what FXSAVE does on a 10980xe. */
6663 && iemMemAreAlignmentChecksEnabled(pVCpu))
6664 return iemRaiseAlignmentCheckException(pVCpu);
6665 else
6666 return iemRaiseGeneralProtectionFault0(pVCpu);
6667 }
6668
6669#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6670 /* If the access is atomic there are host platform alignmnet restrictions
6671 we need to conform with. */
6672 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6673# if defined(RT_ARCH_AMD64)
6674 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6675# elif defined(RT_ARCH_ARM64)
6676 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6677# else
6678# error port me
6679# endif
6680 )
6681 { /* okay */ }
6682 else
6683 {
6684 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6685 pVCpu->iem.s.cMisalignedAtomics += 1;
6686 return VINF_EM_EMULATE_SPLIT_LOCK;
6687 }
6688#endif
6689 }
6690
6691#ifdef IEM_WITH_DATA_TLB
6692 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6693
6694 /*
6695 * Get the TLB entry for this page and check PT flags.
6696 *
6697 * We reload the TLB entry if we need to set the dirty bit (accessed
6698 * should in theory always be set).
6699 */
6700 uint8_t *pbMem = NULL;
6701 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
6702 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
6703 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0);
6704 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
6705 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
6706 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
6707 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
6708 {
6709# ifdef IEM_WITH_TLB_STATISTICS
6710 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
6711#endif
6712
6713 /* If the page is either supervisor only or non-writable, we need to do
6714 more careful access checks. */
6715 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6716 {
6717 /* Write to read only memory? */
6718 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6719 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6720 && ( ( IEM_GET_CPL(pVCpu) == 3
6721 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6722 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6723 {
6724 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6725 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6726 }
6727
6728 /* Kernel memory accessed by userland? */
6729 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6730 && IEM_GET_CPL(pVCpu) == 3
6731 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6732 {
6733 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6734 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6735 }
6736 }
6737
6738 /* Look up the physical page info if necessary. */
6739 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6740# ifdef IN_RING3
6741 pbMem = pTlbe->pbMappingR3;
6742# else
6743 pbMem = NULL;
6744# endif
6745 else
6746 {
6747 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6748 { /* likely */ }
6749 else
6750 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6751 pTlbe->pbMappingR3 = NULL;
6752 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
6753 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6754 &pbMem, &pTlbe->fFlagsAndPhysRev);
6755 AssertRCReturn(rc, rc);
6756# ifdef IN_RING3
6757 pTlbe->pbMappingR3 = pbMem;
6758# endif
6759 }
6760 }
6761 else
6762 {
6763 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
6764
6765 /* This page table walking will set A bits as required by the access while performing the walk.
6766 ASSUMES these are set when the address is translated rather than on commit... */
6767 /** @todo testcase: check when A bits are actually set by the CPU for code. */
6768 PGMPTWALKFAST WalkFast;
6769 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
6770 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
6771 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
6772 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
6773 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
6774 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
6775 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6776 fQPage |= PGMQPAGE_F_USER_MODE;
6777 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
6778 if (RT_SUCCESS(rc))
6779 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
6780 else
6781 {
6782 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6783# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6784 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
6785 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &WalkFast, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6786# endif
6787 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6788 }
6789
6790 uint32_t fDataBps;
6791 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
6792 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
6793 {
6794 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
6795 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
6796 {
6797 pTlbe--;
6798 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
6799 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6800 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6801 IEMTLBTRACE_LOAD(pVCpu, GCPtrMem, true);
6802 }
6803 else
6804 {
6805 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
6806 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
6807 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
6808 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
6809 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, true);
6810 }
6811 }
6812 else
6813 {
6814 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
6815 to the page with the data access breakpoint armed on it to pass thru here. */
6816 if (fDataBps > 1)
6817 LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
6818 fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
6819 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
6820 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
6821 pTlbe->uTag = uTagNoRev;
6822 }
6823 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
6824 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
6825 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
6826 pTlbe->GCPhys = GCPhysPg;
6827 pTlbe->pbMappingR3 = NULL;
6828 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED));
6829 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6830 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) || !(fAccess & IEM_ACCESS_TYPE_WRITE));
6831 Assert( !(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6832 || IEM_GET_CPL(pVCpu) != 3
6833 || (fAccess & IEM_ACCESS_WHAT_SYS));
6834
6835 /* Resolve the physical address. */
6836 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
6837 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6838 &pbMem, &pTlbe->fFlagsAndPhysRev);
6839 AssertRCReturn(rc, rc);
6840# ifdef IN_RING3
6841 pTlbe->pbMappingR3 = pbMem;
6842# endif
6843 }
6844
6845 /*
6846 * Check the physical page level access and mapping.
6847 */
6848 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6849 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6850 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6851 { /* probably likely */ }
6852 else
6853 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6854 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6855 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6856 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6857 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6858 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6859
6860 if (pbMem)
6861 {
6862 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6863 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6864 fAccess |= IEM_ACCESS_NOT_LOCKED;
6865 }
6866 else
6867 {
6868 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6869 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6870 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6871 if (rcStrict != VINF_SUCCESS)
6872 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6873 }
6874
6875 void * const pvMem = pbMem;
6876
6877 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6878 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6879 if (fAccess & IEM_ACCESS_TYPE_READ)
6880 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6881
6882#else /* !IEM_WITH_DATA_TLB */
6883
6884 RTGCPHYS GCPhysFirst;
6885 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6886 if (rcStrict != VINF_SUCCESS)
6887 return rcStrict;
6888
6889 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6890 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6891 if (fAccess & IEM_ACCESS_TYPE_READ)
6892 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6893
6894 void *pvMem;
6895 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6896 if (rcStrict != VINF_SUCCESS)
6897 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6898
6899#endif /* !IEM_WITH_DATA_TLB */
6900
6901 /*
6902 * Fill in the mapping table entry.
6903 */
6904 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6905 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6906 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6907 pVCpu->iem.s.cActiveMappings += 1;
6908
6909 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6910 *ppvMem = pvMem;
6911 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6912 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6913 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6914
6915 return VINF_SUCCESS;
6916}
6917
6918
6919/**
6920 * Commits the guest memory if bounce buffered and unmaps it.
6921 *
6922 * @returns Strict VBox status code.
6923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6924 * @param bUnmapInfo Unmap info set by iemMemMap.
6925 */
6926VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6927{
6928 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6929 AssertMsgReturn( (bUnmapInfo & 0x08)
6930 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6931 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6932 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6933 VERR_NOT_FOUND);
6934
6935 /* If it's bounce buffered, we may need to write back the buffer. */
6936 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6937 {
6938 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6939 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6940 }
6941 /* Otherwise unlock it. */
6942 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6943 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6944
6945 /* Free the entry. */
6946 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6947 Assert(pVCpu->iem.s.cActiveMappings != 0);
6948 pVCpu->iem.s.cActiveMappings--;
6949 return VINF_SUCCESS;
6950}
6951
6952
6953/**
6954 * Rolls back the guest memory (conceptually only) and unmaps it.
6955 *
6956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6957 * @param bUnmapInfo Unmap info set by iemMemMap.
6958 */
6959void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6960{
6961 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6962 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6963 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6964 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6965 == ((unsigned)bUnmapInfo >> 4),
6966 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6967
6968 /* Unlock it if necessary. */
6969 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6970 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6971
6972 /* Free the entry. */
6973 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6974 Assert(pVCpu->iem.s.cActiveMappings != 0);
6975 pVCpu->iem.s.cActiveMappings--;
6976}
6977
6978#ifdef IEM_WITH_SETJMP
6979
6980/**
6981 * Maps the specified guest memory for the given kind of access, longjmp on
6982 * error.
6983 *
6984 * This may be using bounce buffering of the memory if it's crossing a page
6985 * boundary or if there is an access handler installed for any of it. Because
6986 * of lock prefix guarantees, we're in for some extra clutter when this
6987 * happens.
6988 *
6989 * This may raise a \#GP, \#SS, \#PF or \#AC.
6990 *
6991 * @returns Pointer to the mapped memory.
6992 *
6993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6994 * @param bUnmapInfo Where to return unmap info to be passed to
6995 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6996 * iemMemCommitAndUnmapWoSafeJmp,
6997 * iemMemCommitAndUnmapRoSafeJmp,
6998 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6999 * when done.
7000 * @param cbMem The number of bytes to map. This is usually 1,
7001 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
7002 * string operations it can be up to a page.
7003 * @param iSegReg The index of the segment register to use for
7004 * this access. The base and limits are checked.
7005 * Use UINT8_MAX to indicate that no segmentation
7006 * is required (for IDT, GDT and LDT accesses).
7007 * @param GCPtrMem The address of the guest memory.
7008 * @param fAccess How the memory is being accessed. The
7009 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
7010 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
7011 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
7012 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
7013 * set.
7014 * @param uAlignCtl Alignment control:
7015 * - Bits 15:0 is the alignment mask.
7016 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
7017 * IEM_MEMMAP_F_ALIGN_SSE, and
7018 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
7019 * Pass zero to skip alignment.
7020 * @tparam a_fSafe Whether this is a call from "safe" fallback function in
7021 * IEMAllMemRWTmpl.cpp.h (@c true) or a generic one that
7022 * needs counting as such in the statistics.
7023 */
7024template<bool a_fSafeCall = false>
7025static void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7026 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7027{
7028 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemMapJmp);
7029
7030 /*
7031 * Check the input, check segment access and adjust address
7032 * with segment base.
7033 */
7034 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
7035 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
7036 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7037
7038 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
7039 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7040 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7041
7042 /*
7043 * Alignment check.
7044 */
7045 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
7046 { /* likelyish */ }
7047 else
7048 {
7049 /* Misaligned access. */
7050 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7051 {
7052 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
7053 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
7054 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
7055 {
7056 AssertCompile(X86_CR0_AM == X86_EFL_AC);
7057
7058 if (iemMemAreAlignmentChecksEnabled(pVCpu))
7059 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7060 }
7061 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
7062 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
7063 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
7064 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
7065 * that's what FXSAVE does on a 10980xe. */
7066 && iemMemAreAlignmentChecksEnabled(pVCpu))
7067 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
7068 else
7069 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
7070 }
7071
7072#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
7073 /* If the access is atomic there are host platform alignmnet restrictions
7074 we need to conform with. */
7075 if ( !(fAccess & IEM_ACCESS_ATOMIC)
7076# if defined(RT_ARCH_AMD64)
7077 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
7078# elif defined(RT_ARCH_ARM64)
7079 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
7080# else
7081# error port me
7082# endif
7083 )
7084 { /* okay */ }
7085 else
7086 {
7087 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
7088 pVCpu->iem.s.cMisalignedAtomics += 1;
7089 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
7090 }
7091#endif
7092 }
7093
7094 /*
7095 * Figure out which mapping entry to use.
7096 */
7097 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
7098 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7099 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
7100 {
7101 iMemMap = iemMemMapFindFree(pVCpu);
7102 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
7103 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
7104 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
7105 pVCpu->iem.s.aMemMappings[2].fAccess),
7106 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
7107 }
7108
7109 /*
7110 * Crossing a page boundary?
7111 */
7112 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
7113 { /* No (likely). */ }
7114 else
7115 {
7116 void *pvMem;
7117 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
7118 if (rcStrict == VINF_SUCCESS)
7119 return pvMem;
7120 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7121 }
7122
7123#ifdef IEM_WITH_DATA_TLB
7124 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
7125
7126 /*
7127 * Get the TLB entry for this page checking that it has the A & D bits
7128 * set as per fAccess flags.
7129 */
7130 /** @todo make the caller pass these in with fAccess. */
7131 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
7132 ? IEMTLBE_F_PT_NO_USER : 0;
7133 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
7134 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
7135 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
7136 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
7137 ? IEMTLBE_F_PT_NO_WRITE : 0)
7138 : 0;
7139 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
7140 uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
7141 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
7142 uint64_t const fTlbeAD = IEMTLBE_F_PT_NO_ACCESSED | (fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY);
7143 if ( ( pTlbe->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
7144 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) )
7145 || ( (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)
7146 && !(pTlbe->fFlagsAndPhysRev & fTlbeAD) ) )
7147 {
7148# ifdef IEM_WITH_TLB_STATISTICS
7149 if (a_fSafeCall)
7150 pVCpu->iem.s.DataTlb.cTlbSafeHits++;
7151 else
7152 pVCpu->iem.s.DataTlb.cTlbCoreHits++;
7153# endif
7154 }
7155 else
7156 {
7157 if (a_fSafeCall)
7158 pVCpu->iem.s.DataTlb.cTlbSafeMisses++;
7159 else
7160 pVCpu->iem.s.DataTlb.cTlbCoreMisses++;
7161
7162 /* This page table walking will set A and D bits as required by the
7163 access while performing the walk.
7164 ASSUMES these are set when the address is translated rather than on commit... */
7165 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7166 PGMPTWALKFAST WalkFast;
7167 AssertCompile(IEM_ACCESS_TYPE_READ == PGMQPAGE_F_READ);
7168 AssertCompile(IEM_ACCESS_TYPE_WRITE == PGMQPAGE_F_WRITE);
7169 AssertCompile(IEM_ACCESS_TYPE_EXEC == PGMQPAGE_F_EXECUTE);
7170 AssertCompile(X86_CR0_WP == PGMQPAGE_F_CR0_WP0);
7171 uint32_t fQPage = (fAccess & (PGMQPAGE_F_READ | IEM_ACCESS_TYPE_WRITE | PGMQPAGE_F_EXECUTE))
7172 | (((uint32_t)pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP) ^ X86_CR0_WP);
7173 if (IEM_GET_CPL(pVCpu) == 3 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7174 fQPage |= PGMQPAGE_F_USER_MODE;
7175 int rc = PGMGstQueryPageFast(pVCpu, GCPtrMem, fQPage, &WalkFast);
7176 if (RT_SUCCESS(rc))
7177 Assert((WalkFast.fInfo & PGM_WALKINFO_SUCCEEDED) && WalkFast.fFailed == PGM_WALKFAIL_SUCCESS);
7178 else
7179 {
7180 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
7181# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7182 if (WalkFast.fFailed & PGM_WALKFAIL_EPT)
7183 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
7184# endif
7185 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
7186 }
7187
7188 uint32_t fDataBps;
7189 if ( RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
7190 || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
7191 {
7192 if ( !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
7193 || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
7194 {
7195 pTlbe--;
7196 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
7197 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7198 iemTlbLoadedLargePage<false>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7199 IEMTLBTRACE_LOAD(pVCpu, GCPtrMem, true);
7200 }
7201 else
7202 {
7203 if (a_fSafeCall)
7204 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
7205 else
7206 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
7207 pTlbe->uTag = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
7208 if (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE)
7209 iemTlbLoadedLargePage<true>(&pVCpu->iem.s.DataTlb, uTagNoRev, RT_BOOL(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE));
7210 IEMTLBTRACE_LOAD_GLOBAL(pVCpu, GCPtrMem, true);
7211 }
7212 }
7213 else
7214 {
7215 /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
7216 to the page with the data access breakpoint armed on it to pass thru here. */
7217 if (fDataBps > 1)
7218 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
7219 a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
7220 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
7221 pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
7222 pTlbe->uTag = uTagNoRev;
7223 }
7224 pTlbe->fFlagsAndPhysRev = (~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A) /* skipping NX */)
7225 | (WalkFast.fInfo & PGM_WALKINFO_BIG_PAGE);
7226 RTGCPHYS const GCPhysPg = WalkFast.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
7227 pTlbe->GCPhys = GCPhysPg;
7228 pTlbe->pbMappingR3 = NULL;
7229 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7230 Assert(!(pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE));
7231 Assert(!(pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER));
7232
7233 /* Resolve the physical address. */
7234 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_GCPHYS2PTR_MASK));
7235 uint8_t *pbMemFullLoad = NULL;
7236 rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhysPg, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7237 &pbMemFullLoad, &pTlbe->fFlagsAndPhysRev);
7238 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7239# ifdef IN_RING3
7240 pTlbe->pbMappingR3 = pbMemFullLoad;
7241# endif
7242 }
7243
7244 /*
7245 * Check the flags and physical revision.
7246 * Note! This will revalidate the uTlbPhysRev after a full load. This is
7247 * just to keep the code structure simple (i.e. avoid gotos or similar).
7248 */
7249 uint8_t *pbMem;
7250 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
7251 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7252# ifdef IN_RING3
7253 pbMem = pTlbe->pbMappingR3;
7254# else
7255 pbMem = NULL;
7256# endif
7257 else
7258 {
7259 Assert(!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED)));
7260
7261 /*
7262 * Okay, something isn't quite right or needs refreshing.
7263 */
7264 /* Write to read only memory? */
7265 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
7266 {
7267 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7268# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7269/** @todo TLB: EPT isn't integrated into the TLB stuff, so we don't know whether
7270 * to trigger an \#PG or a VM nested paging exit here yet! */
7271 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7272 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7273# endif
7274 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7275 }
7276
7277 /* Kernel memory accessed by userland? */
7278 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
7279 {
7280 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7281# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
7282/** @todo TLB: See above. */
7283 if (Walk.fFailed & PGM_WALKFAIL_EPT)
7284 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
7285# endif
7286 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
7287 }
7288
7289 /*
7290 * Check if the physical page info needs updating.
7291 */
7292 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
7293# ifdef IN_RING3
7294 pbMem = pTlbe->pbMappingR3;
7295# else
7296 pbMem = NULL;
7297# endif
7298 else
7299 {
7300 pTlbe->pbMappingR3 = NULL;
7301 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_GCPHYS2PTR_MASK;
7302 pbMem = NULL;
7303 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
7304 &pbMem, &pTlbe->fFlagsAndPhysRev);
7305 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
7306# ifdef IN_RING3
7307 pTlbe->pbMappingR3 = pbMem;
7308# endif
7309 }
7310
7311 /*
7312 * Check the physical page level access and mapping.
7313 */
7314 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
7315 { /* probably likely */ }
7316 else
7317 {
7318 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
7319 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
7320 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
7321 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
7322 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
7323 if (rcStrict == VINF_SUCCESS)
7324 return pbMem;
7325 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7326 }
7327 }
7328 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
7329
7330 if (pbMem)
7331 {
7332 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
7333 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7334 fAccess |= IEM_ACCESS_NOT_LOCKED;
7335 }
7336 else
7337 {
7338 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
7339 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
7340 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7341 if (rcStrict == VINF_SUCCESS)
7342 {
7343 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7344 return pbMem;
7345 }
7346 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7347 }
7348
7349 void * const pvMem = pbMem;
7350
7351 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7352 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7353 if (fAccess & IEM_ACCESS_TYPE_READ)
7354 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
7355
7356#else /* !IEM_WITH_DATA_TLB */
7357
7358
7359 RTGCPHYS GCPhysFirst;
7360 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
7361 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
7362 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7363
7364 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7365 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7366 if (fAccess & IEM_ACCESS_TYPE_READ)
7367 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
7368
7369 void *pvMem;
7370 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7371 if (rcStrict == VINF_SUCCESS)
7372 { /* likely */ }
7373 else
7374 {
7375 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
7376 if (rcStrict == VINF_SUCCESS)
7377 return pvMem;
7378 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7379 }
7380
7381#endif /* !IEM_WITH_DATA_TLB */
7382
7383 /*
7384 * Fill in the mapping table entry.
7385 */
7386 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
7387 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
7388 pVCpu->iem.s.iNextMapping = iMemMap + 1;
7389 pVCpu->iem.s.cActiveMappings++;
7390
7391 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
7392
7393 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
7394 return pvMem;
7395}
7396
7397
7398/** @see iemMemMapJmp */
7399static void *iemMemMapSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
7400 uint32_t fAccess, uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
7401{
7402 return iemMemMapJmp<true /*a_fSafeCall*/>(pVCpu, pbUnmapInfo, cbMem, iSegReg, GCPtrMem, fAccess, uAlignCtl);
7403}
7404
7405
7406/**
7407 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
7408 *
7409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7410 * @param pvMem The mapping.
7411 * @param fAccess The kind of access.
7412 */
7413void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7414{
7415 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7416 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7417 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7418 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7419 == ((unsigned)bUnmapInfo >> 4),
7420 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7421
7422 /* If it's bounce buffered, we may need to write back the buffer. */
7423 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7424 {
7425 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7426 {
7427 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7428 if (rcStrict == VINF_SUCCESS)
7429 return;
7430 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7431 }
7432 }
7433 /* Otherwise unlock it. */
7434 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7435 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7436
7437 /* Free the entry. */
7438 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7439 Assert(pVCpu->iem.s.cActiveMappings != 0);
7440 pVCpu->iem.s.cActiveMappings--;
7441}
7442
7443
7444/** Fallback for iemMemCommitAndUnmapRwJmp. */
7445void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7446{
7447 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7448 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7449}
7450
7451
7452/** Fallback for iemMemCommitAndUnmapAtJmp. */
7453void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7454{
7455 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7456 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7457}
7458
7459
7460/** Fallback for iemMemCommitAndUnmapWoJmp. */
7461void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7462{
7463 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7464 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7465}
7466
7467
7468/** Fallback for iemMemCommitAndUnmapRoJmp. */
7469void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7470{
7471 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7472 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7473}
7474
7475
7476/** Fallback for iemMemRollbackAndUnmapWo. */
7477void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7478{
7479 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7480 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7481}
7482
7483#endif /* IEM_WITH_SETJMP */
7484
7485#ifndef IN_RING3
7486/**
7487 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7488 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7489 *
7490 * Allows the instruction to be completed and retired, while the IEM user will
7491 * return to ring-3 immediately afterwards and do the postponed writes there.
7492 *
7493 * @returns VBox status code (no strict statuses). Caller must check
7494 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7496 * @param pvMem The mapping.
7497 * @param fAccess The kind of access.
7498 */
7499VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7500{
7501 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7502 AssertMsgReturn( (bUnmapInfo & 0x08)
7503 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7504 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7505 == ((unsigned)bUnmapInfo >> 4),
7506 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7507 VERR_NOT_FOUND);
7508
7509 /* If it's bounce buffered, we may need to write back the buffer. */
7510 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7511 {
7512 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7513 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7514 }
7515 /* Otherwise unlock it. */
7516 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7517 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7518
7519 /* Free the entry. */
7520 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7521 Assert(pVCpu->iem.s.cActiveMappings != 0);
7522 pVCpu->iem.s.cActiveMappings--;
7523 return VINF_SUCCESS;
7524}
7525#endif
7526
7527
7528/**
7529 * Rollbacks mappings, releasing page locks and such.
7530 *
7531 * The caller shall only call this after checking cActiveMappings.
7532 *
7533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7534 */
7535void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7536{
7537 Assert(pVCpu->iem.s.cActiveMappings > 0);
7538
7539 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7540 while (iMemMap-- > 0)
7541 {
7542 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7543 if (fAccess != IEM_ACCESS_INVALID)
7544 {
7545 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7546 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7547 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7548 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7549 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7550 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7551 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7552 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7553 pVCpu->iem.s.cActiveMappings--;
7554 }
7555 }
7556}
7557
7558
7559/*
7560 * Instantiate R/W templates.
7561 */
7562#define TMPL_MEM_WITH_STACK
7563
7564#define TMPL_MEM_TYPE uint8_t
7565#define TMPL_MEM_FN_SUFF U8
7566#define TMPL_MEM_FMT_TYPE "%#04x"
7567#define TMPL_MEM_FMT_DESC "byte"
7568#include "IEMAllMemRWTmpl.cpp.h"
7569
7570#define TMPL_MEM_TYPE uint16_t
7571#define TMPL_MEM_FN_SUFF U16
7572#define TMPL_MEM_FMT_TYPE "%#06x"
7573#define TMPL_MEM_FMT_DESC "word"
7574#include "IEMAllMemRWTmpl.cpp.h"
7575
7576#define TMPL_WITH_PUSH_SREG
7577#define TMPL_MEM_TYPE uint32_t
7578#define TMPL_MEM_FN_SUFF U32
7579#define TMPL_MEM_FMT_TYPE "%#010x"
7580#define TMPL_MEM_FMT_DESC "dword"
7581#include "IEMAllMemRWTmpl.cpp.h"
7582#undef TMPL_WITH_PUSH_SREG
7583
7584#define TMPL_MEM_TYPE uint64_t
7585#define TMPL_MEM_FN_SUFF U64
7586#define TMPL_MEM_FMT_TYPE "%#018RX64"
7587#define TMPL_MEM_FMT_DESC "qword"
7588#include "IEMAllMemRWTmpl.cpp.h"
7589
7590#undef TMPL_MEM_WITH_STACK
7591
7592#define TMPL_MEM_TYPE uint32_t
7593#define TMPL_MEM_TYPE_ALIGN 0
7594#define TMPL_MEM_FN_SUFF U32NoAc
7595#define TMPL_MEM_FMT_TYPE "%#010x"
7596#define TMPL_MEM_FMT_DESC "dword"
7597#include "IEMAllMemRWTmpl.cpp.h"
7598#undef TMPL_WITH_PUSH_SREG
7599
7600#define TMPL_MEM_TYPE uint64_t
7601#define TMPL_MEM_TYPE_ALIGN 0
7602#define TMPL_MEM_FN_SUFF U64NoAc
7603#define TMPL_MEM_FMT_TYPE "%#018RX64"
7604#define TMPL_MEM_FMT_DESC "qword"
7605#include "IEMAllMemRWTmpl.cpp.h"
7606
7607#define TMPL_MEM_TYPE uint64_t
7608#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7609#define TMPL_MEM_FN_SUFF U64AlignedU128
7610#define TMPL_MEM_FMT_TYPE "%#018RX64"
7611#define TMPL_MEM_FMT_DESC "qword"
7612#include "IEMAllMemRWTmpl.cpp.h"
7613
7614/* See IEMAllMemRWTmplInline.cpp.h */
7615#define TMPL_MEM_BY_REF
7616
7617#define TMPL_MEM_TYPE RTFLOAT80U
7618#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7619#define TMPL_MEM_FN_SUFF R80
7620#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7621#define TMPL_MEM_FMT_DESC "tword"
7622#include "IEMAllMemRWTmpl.cpp.h"
7623
7624#define TMPL_MEM_TYPE RTPBCD80U
7625#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7626#define TMPL_MEM_FN_SUFF D80
7627#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7628#define TMPL_MEM_FMT_DESC "tword"
7629#include "IEMAllMemRWTmpl.cpp.h"
7630
7631#define TMPL_MEM_TYPE RTUINT128U
7632#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7633#define TMPL_MEM_FN_SUFF U128
7634#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7635#define TMPL_MEM_FMT_DESC "dqword"
7636#include "IEMAllMemRWTmpl.cpp.h"
7637
7638#define TMPL_MEM_TYPE RTUINT128U
7639#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7640#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7641#define TMPL_MEM_FN_SUFF U128AlignedSse
7642#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7643#define TMPL_MEM_FMT_DESC "dqword"
7644#include "IEMAllMemRWTmpl.cpp.h"
7645
7646#define TMPL_MEM_TYPE RTUINT128U
7647#define TMPL_MEM_TYPE_ALIGN 0
7648#define TMPL_MEM_FN_SUFF U128NoAc
7649#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7650#define TMPL_MEM_FMT_DESC "dqword"
7651#include "IEMAllMemRWTmpl.cpp.h"
7652
7653#define TMPL_MEM_TYPE RTUINT256U
7654#define TMPL_MEM_TYPE_ALIGN 0
7655#define TMPL_MEM_FN_SUFF U256NoAc
7656#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7657#define TMPL_MEM_FMT_DESC "qqword"
7658#include "IEMAllMemRWTmpl.cpp.h"
7659
7660#define TMPL_MEM_TYPE RTUINT256U
7661#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7662#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7663#define TMPL_MEM_FN_SUFF U256AlignedAvx
7664#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7665#define TMPL_MEM_FMT_DESC "qqword"
7666#include "IEMAllMemRWTmpl.cpp.h"
7667
7668/**
7669 * Fetches a data dword and zero extends it to a qword.
7670 *
7671 * @returns Strict VBox status code.
7672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7673 * @param pu64Dst Where to return the qword.
7674 * @param iSegReg The index of the segment register to use for
7675 * this access. The base and limits are checked.
7676 * @param GCPtrMem The address of the guest memory.
7677 */
7678VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7679{
7680 /* The lazy approach for now... */
7681 uint8_t bUnmapInfo;
7682 uint32_t const *pu32Src;
7683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7684 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7685 if (rc == VINF_SUCCESS)
7686 {
7687 *pu64Dst = *pu32Src;
7688 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7689 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7690 }
7691 return rc;
7692}
7693
7694
7695#ifdef SOME_UNUSED_FUNCTION
7696/**
7697 * Fetches a data dword and sign extends it to a qword.
7698 *
7699 * @returns Strict VBox status code.
7700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7701 * @param pu64Dst Where to return the sign extended value.
7702 * @param iSegReg The index of the segment register to use for
7703 * this access. The base and limits are checked.
7704 * @param GCPtrMem The address of the guest memory.
7705 */
7706VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7707{
7708 /* The lazy approach for now... */
7709 uint8_t bUnmapInfo;
7710 int32_t const *pi32Src;
7711 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7712 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7713 if (rc == VINF_SUCCESS)
7714 {
7715 *pu64Dst = *pi32Src;
7716 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7717 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7718 }
7719#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7720 else
7721 *pu64Dst = 0;
7722#endif
7723 return rc;
7724}
7725#endif
7726
7727
7728/**
7729 * Fetches a descriptor register (lgdt, lidt).
7730 *
7731 * @returns Strict VBox status code.
7732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7733 * @param pcbLimit Where to return the limit.
7734 * @param pGCPtrBase Where to return the base.
7735 * @param iSegReg The index of the segment register to use for
7736 * this access. The base and limits are checked.
7737 * @param GCPtrMem The address of the guest memory.
7738 * @param enmOpSize The effective operand size.
7739 */
7740VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7741 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7742{
7743 /*
7744 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7745 * little special:
7746 * - The two reads are done separately.
7747 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7748 * - We suspect the 386 to actually commit the limit before the base in
7749 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7750 * don't try emulate this eccentric behavior, because it's not well
7751 * enough understood and rather hard to trigger.
7752 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7753 */
7754 VBOXSTRICTRC rcStrict;
7755 if (IEM_IS_64BIT_CODE(pVCpu))
7756 {
7757 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7758 if (rcStrict == VINF_SUCCESS)
7759 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7760 }
7761 else
7762 {
7763 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7764 if (enmOpSize == IEMMODE_32BIT)
7765 {
7766 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7767 {
7768 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7769 if (rcStrict == VINF_SUCCESS)
7770 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7771 }
7772 else
7773 {
7774 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7775 if (rcStrict == VINF_SUCCESS)
7776 {
7777 *pcbLimit = (uint16_t)uTmp;
7778 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7779 }
7780 }
7781 if (rcStrict == VINF_SUCCESS)
7782 *pGCPtrBase = uTmp;
7783 }
7784 else
7785 {
7786 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7787 if (rcStrict == VINF_SUCCESS)
7788 {
7789 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7790 if (rcStrict == VINF_SUCCESS)
7791 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7792 }
7793 }
7794 }
7795 return rcStrict;
7796}
7797
7798
7799/**
7800 * Stores a data dqword, SSE aligned.
7801 *
7802 * @returns Strict VBox status code.
7803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7804 * @param iSegReg The index of the segment register to use for
7805 * this access. The base and limits are checked.
7806 * @param GCPtrMem The address of the guest memory.
7807 * @param u128Value The value to store.
7808 */
7809VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7810{
7811 /* The lazy approach for now... */
7812 uint8_t bUnmapInfo;
7813 PRTUINT128U pu128Dst;
7814 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7815 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7816 if (rc == VINF_SUCCESS)
7817 {
7818 pu128Dst->au64[0] = u128Value.au64[0];
7819 pu128Dst->au64[1] = u128Value.au64[1];
7820 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7821 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7822 }
7823 return rc;
7824}
7825
7826
7827#ifdef IEM_WITH_SETJMP
7828/**
7829 * Stores a data dqword, SSE aligned.
7830 *
7831 * @returns Strict VBox status code.
7832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7833 * @param iSegReg The index of the segment register to use for
7834 * this access. The base and limits are checked.
7835 * @param GCPtrMem The address of the guest memory.
7836 * @param u128Value The value to store.
7837 */
7838void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7839 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7840{
7841 /* The lazy approach for now... */
7842 uint8_t bUnmapInfo;
7843 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7844 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7845 pu128Dst->au64[0] = u128Value.au64[0];
7846 pu128Dst->au64[1] = u128Value.au64[1];
7847 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7848 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7849}
7850#endif
7851
7852
7853/**
7854 * Stores a data dqword.
7855 *
7856 * @returns Strict VBox status code.
7857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7858 * @param iSegReg The index of the segment register to use for
7859 * this access. The base and limits are checked.
7860 * @param GCPtrMem The address of the guest memory.
7861 * @param pu256Value Pointer to the value to store.
7862 */
7863VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7864{
7865 /* The lazy approach for now... */
7866 uint8_t bUnmapInfo;
7867 PRTUINT256U pu256Dst;
7868 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7869 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7870 if (rc == VINF_SUCCESS)
7871 {
7872 pu256Dst->au64[0] = pu256Value->au64[0];
7873 pu256Dst->au64[1] = pu256Value->au64[1];
7874 pu256Dst->au64[2] = pu256Value->au64[2];
7875 pu256Dst->au64[3] = pu256Value->au64[3];
7876 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7877 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7878 }
7879 return rc;
7880}
7881
7882
7883#ifdef IEM_WITH_SETJMP
7884/**
7885 * Stores a data dqword, longjmp on error.
7886 *
7887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7888 * @param iSegReg The index of the segment register to use for
7889 * this access. The base and limits are checked.
7890 * @param GCPtrMem The address of the guest memory.
7891 * @param pu256Value Pointer to the value to store.
7892 */
7893void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7894{
7895 /* The lazy approach for now... */
7896 uint8_t bUnmapInfo;
7897 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7898 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7899 pu256Dst->au64[0] = pu256Value->au64[0];
7900 pu256Dst->au64[1] = pu256Value->au64[1];
7901 pu256Dst->au64[2] = pu256Value->au64[2];
7902 pu256Dst->au64[3] = pu256Value->au64[3];
7903 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7904 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7905}
7906#endif
7907
7908
7909/**
7910 * Stores a descriptor register (sgdt, sidt).
7911 *
7912 * @returns Strict VBox status code.
7913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7914 * @param cbLimit The limit.
7915 * @param GCPtrBase The base address.
7916 * @param iSegReg The index of the segment register to use for
7917 * this access. The base and limits are checked.
7918 * @param GCPtrMem The address of the guest memory.
7919 */
7920VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7921{
7922 /*
7923 * The SIDT and SGDT instructions actually stores the data using two
7924 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7925 * does not respond to opsize prefixes.
7926 */
7927 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7928 if (rcStrict == VINF_SUCCESS)
7929 {
7930 if (IEM_IS_16BIT_CODE(pVCpu))
7931 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7932 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7933 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7934 else if (IEM_IS_32BIT_CODE(pVCpu))
7935 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7936 else
7937 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7938 }
7939 return rcStrict;
7940}
7941
7942
7943/**
7944 * Begin a special stack push (used by interrupt, exceptions and such).
7945 *
7946 * This will raise \#SS or \#PF if appropriate.
7947 *
7948 * @returns Strict VBox status code.
7949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7950 * @param cbMem The number of bytes to push onto the stack.
7951 * @param cbAlign The alignment mask (7, 3, 1).
7952 * @param ppvMem Where to return the pointer to the stack memory.
7953 * As with the other memory functions this could be
7954 * direct access or bounce buffered access, so
7955 * don't commit register until the commit call
7956 * succeeds.
7957 * @param pbUnmapInfo Where to store unmap info for
7958 * iemMemStackPushCommitSpecial.
7959 * @param puNewRsp Where to return the new RSP value. This must be
7960 * passed unchanged to
7961 * iemMemStackPushCommitSpecial().
7962 */
7963VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7964 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7965{
7966 Assert(cbMem < UINT8_MAX);
7967 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7968 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7969}
7970
7971
7972/**
7973 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7974 *
7975 * This will update the rSP.
7976 *
7977 * @returns Strict VBox status code.
7978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7979 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7980 * @param uNewRsp The new RSP value returned by
7981 * iemMemStackPushBeginSpecial().
7982 */
7983VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7984{
7985 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7986 if (rcStrict == VINF_SUCCESS)
7987 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7988 return rcStrict;
7989}
7990
7991
7992/**
7993 * Begin a special stack pop (used by iret, retf and such).
7994 *
7995 * This will raise \#SS or \#PF if appropriate.
7996 *
7997 * @returns Strict VBox status code.
7998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7999 * @param cbMem The number of bytes to pop from the stack.
8000 * @param cbAlign The alignment mask (7, 3, 1).
8001 * @param ppvMem Where to return the pointer to the stack memory.
8002 * @param pbUnmapInfo Where to store unmap info for
8003 * iemMemStackPopDoneSpecial.
8004 * @param puNewRsp Where to return the new RSP value. This must be
8005 * assigned to CPUMCTX::rsp manually some time
8006 * after iemMemStackPopDoneSpecial() has been
8007 * called.
8008 */
8009VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8010 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
8011{
8012 Assert(cbMem < UINT8_MAX);
8013 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8014 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8015}
8016
8017
8018/**
8019 * Continue a special stack pop (used by iret and retf), for the purpose of
8020 * retrieving a new stack pointer.
8021 *
8022 * This will raise \#SS or \#PF if appropriate.
8023 *
8024 * @returns Strict VBox status code.
8025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8026 * @param off Offset from the top of the stack. This is zero
8027 * except in the retf case.
8028 * @param cbMem The number of bytes to pop from the stack.
8029 * @param ppvMem Where to return the pointer to the stack memory.
8030 * @param pbUnmapInfo Where to store unmap info for
8031 * iemMemStackPopDoneSpecial.
8032 * @param uCurNewRsp The current uncommitted RSP value. (No need to
8033 * return this because all use of this function is
8034 * to retrieve a new value and anything we return
8035 * here would be discarded.)
8036 */
8037VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
8038 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
8039{
8040 Assert(cbMem < UINT8_MAX);
8041
8042 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
8043 RTGCPTR GCPtrTop;
8044 if (IEM_IS_64BIT_CODE(pVCpu))
8045 GCPtrTop = uCurNewRsp;
8046 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
8047 GCPtrTop = (uint32_t)uCurNewRsp;
8048 else
8049 GCPtrTop = (uint16_t)uCurNewRsp;
8050
8051 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
8052 0 /* checked in iemMemStackPopBeginSpecial */);
8053}
8054
8055
8056/**
8057 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8058 * iemMemStackPopContinueSpecial).
8059 *
8060 * The caller will manually commit the rSP.
8061 *
8062 * @returns Strict VBox status code.
8063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8064 * @param bUnmapInfo Unmap information returned by
8065 * iemMemStackPopBeginSpecial() or
8066 * iemMemStackPopContinueSpecial().
8067 */
8068VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
8069{
8070 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8071}
8072
8073
8074/**
8075 * Fetches a system table byte.
8076 *
8077 * @returns Strict VBox status code.
8078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8079 * @param pbDst Where to return the byte.
8080 * @param iSegReg The index of the segment register to use for
8081 * this access. The base and limits are checked.
8082 * @param GCPtrMem The address of the guest memory.
8083 */
8084VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8085{
8086 /* The lazy approach for now... */
8087 uint8_t bUnmapInfo;
8088 uint8_t const *pbSrc;
8089 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8090 if (rc == VINF_SUCCESS)
8091 {
8092 *pbDst = *pbSrc;
8093 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8094 }
8095 return rc;
8096}
8097
8098
8099/**
8100 * Fetches a system table word.
8101 *
8102 * @returns Strict VBox status code.
8103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8104 * @param pu16Dst Where to return the word.
8105 * @param iSegReg The index of the segment register to use for
8106 * this access. The base and limits are checked.
8107 * @param GCPtrMem The address of the guest memory.
8108 */
8109VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8110{
8111 /* The lazy approach for now... */
8112 uint8_t bUnmapInfo;
8113 uint16_t const *pu16Src;
8114 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8115 if (rc == VINF_SUCCESS)
8116 {
8117 *pu16Dst = *pu16Src;
8118 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8119 }
8120 return rc;
8121}
8122
8123
8124/**
8125 * Fetches a system table dword.
8126 *
8127 * @returns Strict VBox status code.
8128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8129 * @param pu32Dst Where to return the dword.
8130 * @param iSegReg The index of the segment register to use for
8131 * this access. The base and limits are checked.
8132 * @param GCPtrMem The address of the guest memory.
8133 */
8134VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8135{
8136 /* The lazy approach for now... */
8137 uint8_t bUnmapInfo;
8138 uint32_t const *pu32Src;
8139 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8140 if (rc == VINF_SUCCESS)
8141 {
8142 *pu32Dst = *pu32Src;
8143 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8144 }
8145 return rc;
8146}
8147
8148
8149/**
8150 * Fetches a system table qword.
8151 *
8152 * @returns Strict VBox status code.
8153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8154 * @param pu64Dst Where to return the qword.
8155 * @param iSegReg The index of the segment register to use for
8156 * this access. The base and limits are checked.
8157 * @param GCPtrMem The address of the guest memory.
8158 */
8159VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8160{
8161 /* The lazy approach for now... */
8162 uint8_t bUnmapInfo;
8163 uint64_t const *pu64Src;
8164 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8165 if (rc == VINF_SUCCESS)
8166 {
8167 *pu64Dst = *pu64Src;
8168 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8169 }
8170 return rc;
8171}
8172
8173
8174/**
8175 * Fetches a descriptor table entry with caller specified error code.
8176 *
8177 * @returns Strict VBox status code.
8178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8179 * @param pDesc Where to return the descriptor table entry.
8180 * @param uSel The selector which table entry to fetch.
8181 * @param uXcpt The exception to raise on table lookup error.
8182 * @param uErrorCode The error code associated with the exception.
8183 */
8184static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8185 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8186{
8187 AssertPtr(pDesc);
8188 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8189
8190 /** @todo did the 286 require all 8 bytes to be accessible? */
8191 /*
8192 * Get the selector table base and check bounds.
8193 */
8194 RTGCPTR GCPtrBase;
8195 if (uSel & X86_SEL_LDT)
8196 {
8197 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8198 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8199 {
8200 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8201 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8202 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8203 uErrorCode, 0);
8204 }
8205
8206 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8207 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8208 }
8209 else
8210 {
8211 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8212 {
8213 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8214 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8215 uErrorCode, 0);
8216 }
8217 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8218 }
8219
8220 /*
8221 * Read the legacy descriptor and maybe the long mode extensions if
8222 * required.
8223 */
8224 VBOXSTRICTRC rcStrict;
8225 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8226 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8227 else
8228 {
8229 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8230 if (rcStrict == VINF_SUCCESS)
8231 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8232 if (rcStrict == VINF_SUCCESS)
8233 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8234 if (rcStrict == VINF_SUCCESS)
8235 pDesc->Legacy.au16[3] = 0;
8236 else
8237 return rcStrict;
8238 }
8239
8240 if (rcStrict == VINF_SUCCESS)
8241 {
8242 if ( !IEM_IS_LONG_MODE(pVCpu)
8243 || pDesc->Legacy.Gen.u1DescType)
8244 pDesc->Long.au64[1] = 0;
8245 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
8246 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8247 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8248 else
8249 {
8250 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8251 /** @todo is this the right exception? */
8252 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8253 }
8254 }
8255 return rcStrict;
8256}
8257
8258
8259/**
8260 * Fetches a descriptor table entry.
8261 *
8262 * @returns Strict VBox status code.
8263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8264 * @param pDesc Where to return the descriptor table entry.
8265 * @param uSel The selector which table entry to fetch.
8266 * @param uXcpt The exception to raise on table lookup error.
8267 */
8268VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8269{
8270 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8271}
8272
8273
8274/**
8275 * Marks the selector descriptor as accessed (only non-system descriptors).
8276 *
8277 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8278 * will therefore skip the limit checks.
8279 *
8280 * @returns Strict VBox status code.
8281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8282 * @param uSel The selector.
8283 */
8284VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8285{
8286 /*
8287 * Get the selector table base and calculate the entry address.
8288 */
8289 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8290 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8291 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8292 GCPtr += uSel & X86_SEL_MASK;
8293
8294 /*
8295 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8296 * ugly stuff to avoid this. This will make sure it's an atomic access
8297 * as well more or less remove any question about 8-bit or 32-bit accesss.
8298 */
8299 VBOXSTRICTRC rcStrict;
8300 uint8_t bUnmapInfo;
8301 uint32_t volatile *pu32;
8302 if ((GCPtr & 3) == 0)
8303 {
8304 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8305 GCPtr += 2 + 2;
8306 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8307 if (rcStrict != VINF_SUCCESS)
8308 return rcStrict;
8309 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8310 }
8311 else
8312 {
8313 /* The misaligned GDT/LDT case, map the whole thing. */
8314 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8315 if (rcStrict != VINF_SUCCESS)
8316 return rcStrict;
8317 switch ((uintptr_t)pu32 & 3)
8318 {
8319 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8320 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8321 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8322 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8323 }
8324 }
8325
8326 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8327}
8328
8329
8330#undef LOG_GROUP
8331#define LOG_GROUP LOG_GROUP_IEM
8332
8333/** @} */
8334
8335/** @name Opcode Helpers.
8336 * @{
8337 */
8338
8339/**
8340 * Calculates the effective address of a ModR/M memory operand.
8341 *
8342 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8343 *
8344 * @return Strict VBox status code.
8345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8346 * @param bRm The ModRM byte.
8347 * @param cbImmAndRspOffset - First byte: The size of any immediate
8348 * following the effective address opcode bytes
8349 * (only for RIP relative addressing).
8350 * - Second byte: RSP displacement (for POP [ESP]).
8351 * @param pGCPtrEff Where to return the effective address.
8352 */
8353VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8354{
8355 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8356# define SET_SS_DEF() \
8357 do \
8358 { \
8359 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8360 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8361 } while (0)
8362
8363 if (!IEM_IS_64BIT_CODE(pVCpu))
8364 {
8365/** @todo Check the effective address size crap! */
8366 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8367 {
8368 uint16_t u16EffAddr;
8369
8370 /* Handle the disp16 form with no registers first. */
8371 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8372 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8373 else
8374 {
8375 /* Get the displacment. */
8376 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8377 {
8378 case 0: u16EffAddr = 0; break;
8379 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8380 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8381 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8382 }
8383
8384 /* Add the base and index registers to the disp. */
8385 switch (bRm & X86_MODRM_RM_MASK)
8386 {
8387 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8388 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8389 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8390 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8391 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8392 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8393 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8394 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8395 }
8396 }
8397
8398 *pGCPtrEff = u16EffAddr;
8399 }
8400 else
8401 {
8402 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8403 uint32_t u32EffAddr;
8404
8405 /* Handle the disp32 form with no registers first. */
8406 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8407 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8408 else
8409 {
8410 /* Get the register (or SIB) value. */
8411 switch ((bRm & X86_MODRM_RM_MASK))
8412 {
8413 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8414 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8415 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8416 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8417 case 4: /* SIB */
8418 {
8419 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8420
8421 /* Get the index and scale it. */
8422 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8423 {
8424 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8425 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8426 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8427 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8428 case 4: u32EffAddr = 0; /*none */ break;
8429 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8430 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8431 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8432 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8433 }
8434 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8435
8436 /* add base */
8437 switch (bSib & X86_SIB_BASE_MASK)
8438 {
8439 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8440 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8441 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8442 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8443 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8444 case 5:
8445 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8446 {
8447 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8448 SET_SS_DEF();
8449 }
8450 else
8451 {
8452 uint32_t u32Disp;
8453 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8454 u32EffAddr += u32Disp;
8455 }
8456 break;
8457 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8458 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8459 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8460 }
8461 break;
8462 }
8463 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8464 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8465 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8466 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8467 }
8468
8469 /* Get and add the displacement. */
8470 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8471 {
8472 case 0:
8473 break;
8474 case 1:
8475 {
8476 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8477 u32EffAddr += i8Disp;
8478 break;
8479 }
8480 case 2:
8481 {
8482 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8483 u32EffAddr += u32Disp;
8484 break;
8485 }
8486 default:
8487 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8488 }
8489
8490 }
8491 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8492 *pGCPtrEff = u32EffAddr;
8493 }
8494 }
8495 else
8496 {
8497 uint64_t u64EffAddr;
8498
8499 /* Handle the rip+disp32 form with no registers first. */
8500 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8501 {
8502 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8503 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8504 }
8505 else
8506 {
8507 /* Get the register (or SIB) value. */
8508 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8509 {
8510 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8511 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8512 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8513 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8514 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8515 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8516 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8517 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8518 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8519 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8520 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8521 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8522 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8523 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8524 /* SIB */
8525 case 4:
8526 case 12:
8527 {
8528 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8529
8530 /* Get the index and scale it. */
8531 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8532 {
8533 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8534 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8535 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8536 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8537 case 4: u64EffAddr = 0; /*none */ break;
8538 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8539 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8540 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8541 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8542 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8543 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8544 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8545 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8546 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8547 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8548 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8549 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8550 }
8551 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8552
8553 /* add base */
8554 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8555 {
8556 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8557 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8558 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8559 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8560 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8561 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8562 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8563 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8564 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8565 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8566 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8567 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8568 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8569 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8570 /* complicated encodings */
8571 case 5:
8572 case 13:
8573 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8574 {
8575 if (!pVCpu->iem.s.uRexB)
8576 {
8577 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8578 SET_SS_DEF();
8579 }
8580 else
8581 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8582 }
8583 else
8584 {
8585 uint32_t u32Disp;
8586 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8587 u64EffAddr += (int32_t)u32Disp;
8588 }
8589 break;
8590 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8591 }
8592 break;
8593 }
8594 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8595 }
8596
8597 /* Get and add the displacement. */
8598 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8599 {
8600 case 0:
8601 break;
8602 case 1:
8603 {
8604 int8_t i8Disp;
8605 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8606 u64EffAddr += i8Disp;
8607 break;
8608 }
8609 case 2:
8610 {
8611 uint32_t u32Disp;
8612 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8613 u64EffAddr += (int32_t)u32Disp;
8614 break;
8615 }
8616 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8617 }
8618
8619 }
8620
8621 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8622 *pGCPtrEff = u64EffAddr;
8623 else
8624 {
8625 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8626 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8627 }
8628 }
8629
8630 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8631 return VINF_SUCCESS;
8632}
8633
8634
8635#ifdef IEM_WITH_SETJMP
8636/**
8637 * Calculates the effective address of a ModR/M memory operand.
8638 *
8639 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8640 *
8641 * May longjmp on internal error.
8642 *
8643 * @return The effective address.
8644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8645 * @param bRm The ModRM byte.
8646 * @param cbImmAndRspOffset - First byte: The size of any immediate
8647 * following the effective address opcode bytes
8648 * (only for RIP relative addressing).
8649 * - Second byte: RSP displacement (for POP [ESP]).
8650 */
8651RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8652{
8653 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8654# define SET_SS_DEF() \
8655 do \
8656 { \
8657 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8658 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8659 } while (0)
8660
8661 if (!IEM_IS_64BIT_CODE(pVCpu))
8662 {
8663/** @todo Check the effective address size crap! */
8664 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8665 {
8666 uint16_t u16EffAddr;
8667
8668 /* Handle the disp16 form with no registers first. */
8669 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8670 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8671 else
8672 {
8673 /* Get the displacment. */
8674 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8675 {
8676 case 0: u16EffAddr = 0; break;
8677 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8678 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8679 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8680 }
8681
8682 /* Add the base and index registers to the disp. */
8683 switch (bRm & X86_MODRM_RM_MASK)
8684 {
8685 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8686 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8687 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8688 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8689 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8690 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8691 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8692 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8693 }
8694 }
8695
8696 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8697 return u16EffAddr;
8698 }
8699
8700 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8701 uint32_t u32EffAddr;
8702
8703 /* Handle the disp32 form with no registers first. */
8704 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8705 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8706 else
8707 {
8708 /* Get the register (or SIB) value. */
8709 switch ((bRm & X86_MODRM_RM_MASK))
8710 {
8711 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8712 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8713 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8714 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8715 case 4: /* SIB */
8716 {
8717 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8718
8719 /* Get the index and scale it. */
8720 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8721 {
8722 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8723 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8724 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8725 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8726 case 4: u32EffAddr = 0; /*none */ break;
8727 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8728 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8729 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8730 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8731 }
8732 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8733
8734 /* add base */
8735 switch (bSib & X86_SIB_BASE_MASK)
8736 {
8737 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8738 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8739 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8740 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8741 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8742 case 5:
8743 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8744 {
8745 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8746 SET_SS_DEF();
8747 }
8748 else
8749 {
8750 uint32_t u32Disp;
8751 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8752 u32EffAddr += u32Disp;
8753 }
8754 break;
8755 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8756 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8757 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8758 }
8759 break;
8760 }
8761 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8762 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8763 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8764 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8765 }
8766
8767 /* Get and add the displacement. */
8768 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8769 {
8770 case 0:
8771 break;
8772 case 1:
8773 {
8774 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8775 u32EffAddr += i8Disp;
8776 break;
8777 }
8778 case 2:
8779 {
8780 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8781 u32EffAddr += u32Disp;
8782 break;
8783 }
8784 default:
8785 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8786 }
8787 }
8788
8789 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8790 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8791 return u32EffAddr;
8792 }
8793
8794 uint64_t u64EffAddr;
8795
8796 /* Handle the rip+disp32 form with no registers first. */
8797 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8798 {
8799 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8800 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8801 }
8802 else
8803 {
8804 /* Get the register (or SIB) value. */
8805 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8806 {
8807 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8808 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8809 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8810 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8811 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8812 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8813 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8814 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8815 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8816 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8817 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8818 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8819 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8820 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8821 /* SIB */
8822 case 4:
8823 case 12:
8824 {
8825 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8826
8827 /* Get the index and scale it. */
8828 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8829 {
8830 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8831 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8832 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8833 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8834 case 4: u64EffAddr = 0; /*none */ break;
8835 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8836 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8837 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8838 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8839 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8840 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8841 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8842 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8843 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8844 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8845 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8846 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8847 }
8848 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8849
8850 /* add base */
8851 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8852 {
8853 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8854 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8855 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8856 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8857 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8858 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8859 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8860 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8861 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8862 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8863 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8864 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8865 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8866 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8867 /* complicated encodings */
8868 case 5:
8869 case 13:
8870 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8871 {
8872 if (!pVCpu->iem.s.uRexB)
8873 {
8874 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8875 SET_SS_DEF();
8876 }
8877 else
8878 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8879 }
8880 else
8881 {
8882 uint32_t u32Disp;
8883 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8884 u64EffAddr += (int32_t)u32Disp;
8885 }
8886 break;
8887 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8888 }
8889 break;
8890 }
8891 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8892 }
8893
8894 /* Get and add the displacement. */
8895 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8896 {
8897 case 0:
8898 break;
8899 case 1:
8900 {
8901 int8_t i8Disp;
8902 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8903 u64EffAddr += i8Disp;
8904 break;
8905 }
8906 case 2:
8907 {
8908 uint32_t u32Disp;
8909 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8910 u64EffAddr += (int32_t)u32Disp;
8911 break;
8912 }
8913 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8914 }
8915
8916 }
8917
8918 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8919 {
8920 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8921 return u64EffAddr;
8922 }
8923 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8924 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8925 return u64EffAddr & UINT32_MAX;
8926}
8927#endif /* IEM_WITH_SETJMP */
8928
8929
8930/**
8931 * Calculates the effective address of a ModR/M memory operand, extended version
8932 * for use in the recompilers.
8933 *
8934 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8935 *
8936 * @return Strict VBox status code.
8937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8938 * @param bRm The ModRM byte.
8939 * @param cbImmAndRspOffset - First byte: The size of any immediate
8940 * following the effective address opcode bytes
8941 * (only for RIP relative addressing).
8942 * - Second byte: RSP displacement (for POP [ESP]).
8943 * @param pGCPtrEff Where to return the effective address.
8944 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8945 * SIB byte (bits 39:32).
8946 */
8947VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8948{
8949 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8950# define SET_SS_DEF() \
8951 do \
8952 { \
8953 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8954 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8955 } while (0)
8956
8957 uint64_t uInfo;
8958 if (!IEM_IS_64BIT_CODE(pVCpu))
8959 {
8960/** @todo Check the effective address size crap! */
8961 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8962 {
8963 uint16_t u16EffAddr;
8964
8965 /* Handle the disp16 form with no registers first. */
8966 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8967 {
8968 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8969 uInfo = u16EffAddr;
8970 }
8971 else
8972 {
8973 /* Get the displacment. */
8974 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8975 {
8976 case 0: u16EffAddr = 0; break;
8977 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8978 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8979 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8980 }
8981 uInfo = u16EffAddr;
8982
8983 /* Add the base and index registers to the disp. */
8984 switch (bRm & X86_MODRM_RM_MASK)
8985 {
8986 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8987 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8988 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8989 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8990 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8991 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8992 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8993 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8994 }
8995 }
8996
8997 *pGCPtrEff = u16EffAddr;
8998 }
8999 else
9000 {
9001 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9002 uint32_t u32EffAddr;
9003
9004 /* Handle the disp32 form with no registers first. */
9005 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9006 {
9007 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9008 uInfo = u32EffAddr;
9009 }
9010 else
9011 {
9012 /* Get the register (or SIB) value. */
9013 uInfo = 0;
9014 switch ((bRm & X86_MODRM_RM_MASK))
9015 {
9016 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9017 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9018 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9019 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9020 case 4: /* SIB */
9021 {
9022 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9023 uInfo = (uint64_t)bSib << 32;
9024
9025 /* Get the index and scale it. */
9026 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9027 {
9028 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9029 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9030 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9031 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9032 case 4: u32EffAddr = 0; /*none */ break;
9033 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9034 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9035 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9036 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9037 }
9038 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9039
9040 /* add base */
9041 switch (bSib & X86_SIB_BASE_MASK)
9042 {
9043 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9044 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9045 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9046 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9047 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9048 case 5:
9049 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9050 {
9051 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9052 SET_SS_DEF();
9053 }
9054 else
9055 {
9056 uint32_t u32Disp;
9057 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9058 u32EffAddr += u32Disp;
9059 uInfo |= u32Disp;
9060 }
9061 break;
9062 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9063 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9064 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9065 }
9066 break;
9067 }
9068 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9069 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9070 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9071 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9072 }
9073
9074 /* Get and add the displacement. */
9075 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9076 {
9077 case 0:
9078 break;
9079 case 1:
9080 {
9081 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9082 u32EffAddr += i8Disp;
9083 uInfo |= (uint32_t)(int32_t)i8Disp;
9084 break;
9085 }
9086 case 2:
9087 {
9088 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9089 u32EffAddr += u32Disp;
9090 uInfo |= (uint32_t)u32Disp;
9091 break;
9092 }
9093 default:
9094 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9095 }
9096
9097 }
9098 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9099 *pGCPtrEff = u32EffAddr;
9100 }
9101 }
9102 else
9103 {
9104 uint64_t u64EffAddr;
9105
9106 /* Handle the rip+disp32 form with no registers first. */
9107 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9108 {
9109 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9110 uInfo = (uint32_t)u64EffAddr;
9111 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
9112 }
9113 else
9114 {
9115 /* Get the register (or SIB) value. */
9116 uInfo = 0;
9117 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9118 {
9119 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9120 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9121 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9122 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9123 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9124 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9125 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9126 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9127 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9128 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9129 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9130 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9131 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9132 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9133 /* SIB */
9134 case 4:
9135 case 12:
9136 {
9137 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9138 uInfo = (uint64_t)bSib << 32;
9139
9140 /* Get the index and scale it. */
9141 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9142 {
9143 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9144 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9145 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9146 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9147 case 4: u64EffAddr = 0; /*none */ break;
9148 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9149 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9150 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9151 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9152 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9153 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9154 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9155 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9156 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9157 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9158 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9159 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9160 }
9161 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9162
9163 /* add base */
9164 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9165 {
9166 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9167 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9168 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9169 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9170 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
9171 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9172 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9173 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9174 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9175 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9176 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9177 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9178 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9179 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9180 /* complicated encodings */
9181 case 5:
9182 case 13:
9183 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9184 {
9185 if (!pVCpu->iem.s.uRexB)
9186 {
9187 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9188 SET_SS_DEF();
9189 }
9190 else
9191 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9192 }
9193 else
9194 {
9195 uint32_t u32Disp;
9196 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9197 u64EffAddr += (int32_t)u32Disp;
9198 uInfo |= u32Disp;
9199 }
9200 break;
9201 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9202 }
9203 break;
9204 }
9205 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9206 }
9207
9208 /* Get and add the displacement. */
9209 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9210 {
9211 case 0:
9212 break;
9213 case 1:
9214 {
9215 int8_t i8Disp;
9216 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9217 u64EffAddr += i8Disp;
9218 uInfo |= (uint32_t)(int32_t)i8Disp;
9219 break;
9220 }
9221 case 2:
9222 {
9223 uint32_t u32Disp;
9224 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9225 u64EffAddr += (int32_t)u32Disp;
9226 uInfo |= u32Disp;
9227 break;
9228 }
9229 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9230 }
9231
9232 }
9233
9234 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9235 *pGCPtrEff = u64EffAddr;
9236 else
9237 {
9238 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9239 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9240 }
9241 }
9242 *puInfo = uInfo;
9243
9244 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9245 return VINF_SUCCESS;
9246}
9247
9248/** @} */
9249
9250
9251#ifdef LOG_ENABLED
9252/**
9253 * Logs the current instruction.
9254 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9255 * @param fSameCtx Set if we have the same context information as the VMM,
9256 * clear if we may have already executed an instruction in
9257 * our debug context. When clear, we assume IEMCPU holds
9258 * valid CPU mode info.
9259 *
9260 * The @a fSameCtx parameter is now misleading and obsolete.
9261 * @param pszFunction The IEM function doing the execution.
9262 */
9263static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9264{
9265# ifdef IN_RING3
9266 if (LogIs2Enabled())
9267 {
9268 char szInstr[256];
9269 uint32_t cbInstr = 0;
9270 if (fSameCtx)
9271 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9272 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9273 szInstr, sizeof(szInstr), &cbInstr);
9274 else
9275 {
9276 uint32_t fFlags = 0;
9277 switch (IEM_GET_CPU_MODE(pVCpu))
9278 {
9279 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9280 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9281 case IEMMODE_16BIT:
9282 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9283 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9284 else
9285 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9286 break;
9287 }
9288 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9289 szInstr, sizeof(szInstr), &cbInstr);
9290 }
9291
9292 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9293 Log2(("**** %s fExec=%x\n"
9294 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9295 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9296 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9297 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9298 " %s\n"
9299 , pszFunction, pVCpu->iem.s.fExec,
9300 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9301 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9302 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9303 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9304 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9305 szInstr));
9306
9307 /* This stuff sucks atm. as it fills the log with MSRs. */
9308 //if (LogIs3Enabled())
9309 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9310 }
9311 else
9312# endif
9313 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9314 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9315 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9316}
9317#endif /* LOG_ENABLED */
9318
9319
9320#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9321/**
9322 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9323 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9324 *
9325 * @returns Modified rcStrict.
9326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9327 * @param rcStrict The instruction execution status.
9328 */
9329static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9330{
9331 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9332 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9333 {
9334 /* VMX preemption timer takes priority over NMI-window exits. */
9335 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9336 {
9337 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9338 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9339 }
9340 /*
9341 * Check remaining intercepts.
9342 *
9343 * NMI-window and Interrupt-window VM-exits.
9344 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9345 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9346 *
9347 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9348 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9349 */
9350 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9351 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9352 && !TRPMHasTrap(pVCpu))
9353 {
9354 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9355 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9356 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9357 {
9358 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9359 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9360 }
9361 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9362 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9363 {
9364 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9365 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9366 }
9367 }
9368 }
9369 /* TPR-below threshold/APIC write has the highest priority. */
9370 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9371 {
9372 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9373 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9374 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9375 }
9376 /* MTF takes priority over VMX-preemption timer. */
9377 else
9378 {
9379 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9380 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9381 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9382 }
9383 return rcStrict;
9384}
9385#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9386
9387
9388/**
9389 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9390 * IEMExecOneWithPrefetchedByPC.
9391 *
9392 * Similar code is found in IEMExecLots.
9393 *
9394 * @return Strict VBox status code.
9395 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9396 * @param fExecuteInhibit If set, execute the instruction following CLI,
9397 * POP SS and MOV SS,GR.
9398 * @param pszFunction The calling function name.
9399 */
9400DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9401{
9402 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9403 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9404 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9405 RT_NOREF_PV(pszFunction);
9406
9407#ifdef IEM_WITH_SETJMP
9408 VBOXSTRICTRC rcStrict;
9409 IEM_TRY_SETJMP(pVCpu, rcStrict)
9410 {
9411 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9412 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9413 }
9414 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9415 {
9416 pVCpu->iem.s.cLongJumps++;
9417 }
9418 IEM_CATCH_LONGJMP_END(pVCpu);
9419#else
9420 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9421 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9422#endif
9423 if (rcStrict == VINF_SUCCESS)
9424 pVCpu->iem.s.cInstructions++;
9425 if (pVCpu->iem.s.cActiveMappings > 0)
9426 {
9427 Assert(rcStrict != VINF_SUCCESS);
9428 iemMemRollback(pVCpu);
9429 }
9430 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9431 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9432 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9433
9434//#ifdef DEBUG
9435// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9436//#endif
9437
9438#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9439 /*
9440 * Perform any VMX nested-guest instruction boundary actions.
9441 *
9442 * If any of these causes a VM-exit, we must skip executing the next
9443 * instruction (would run into stale page tables). A VM-exit makes sure
9444 * there is no interrupt-inhibition, so that should ensure we don't go
9445 * to try execute the next instruction. Clearing fExecuteInhibit is
9446 * problematic because of the setjmp/longjmp clobbering above.
9447 */
9448 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9449 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9450 || rcStrict != VINF_SUCCESS)
9451 { /* likely */ }
9452 else
9453 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9454#endif
9455
9456 /* Execute the next instruction as well if a cli, pop ss or
9457 mov ss, Gr has just completed successfully. */
9458 if ( fExecuteInhibit
9459 && rcStrict == VINF_SUCCESS
9460 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9461 {
9462 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9463 if (rcStrict == VINF_SUCCESS)
9464 {
9465#ifdef LOG_ENABLED
9466 iemLogCurInstr(pVCpu, false, pszFunction);
9467#endif
9468#ifdef IEM_WITH_SETJMP
9469 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9470 {
9471 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9472 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9473 }
9474 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9475 {
9476 pVCpu->iem.s.cLongJumps++;
9477 }
9478 IEM_CATCH_LONGJMP_END(pVCpu);
9479#else
9480 IEM_OPCODE_GET_FIRST_U8(&b);
9481 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9482#endif
9483 if (rcStrict == VINF_SUCCESS)
9484 {
9485 pVCpu->iem.s.cInstructions++;
9486#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9487 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9488 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9489 { /* likely */ }
9490 else
9491 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9492#endif
9493 }
9494 if (pVCpu->iem.s.cActiveMappings > 0)
9495 {
9496 Assert(rcStrict != VINF_SUCCESS);
9497 iemMemRollback(pVCpu);
9498 }
9499 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9500 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9501 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9502 }
9503 else if (pVCpu->iem.s.cActiveMappings > 0)
9504 iemMemRollback(pVCpu);
9505 /** @todo drop this after we bake this change into RIP advancing. */
9506 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9507 }
9508
9509 /*
9510 * Return value fiddling, statistics and sanity assertions.
9511 */
9512 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9513
9514 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9515 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9516 return rcStrict;
9517}
9518
9519
9520/**
9521 * Execute one instruction.
9522 *
9523 * @return Strict VBox status code.
9524 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9525 */
9526VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9527{
9528 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9529#ifdef LOG_ENABLED
9530 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9531#endif
9532
9533 /*
9534 * Do the decoding and emulation.
9535 */
9536 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9537 if (rcStrict == VINF_SUCCESS)
9538 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9539 else if (pVCpu->iem.s.cActiveMappings > 0)
9540 iemMemRollback(pVCpu);
9541
9542 if (rcStrict != VINF_SUCCESS)
9543 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9544 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9545 return rcStrict;
9546}
9547
9548
9549VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9550{
9551 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9552 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9553 if (rcStrict == VINF_SUCCESS)
9554 {
9555 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9556 if (pcbWritten)
9557 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9558 }
9559 else if (pVCpu->iem.s.cActiveMappings > 0)
9560 iemMemRollback(pVCpu);
9561
9562 return rcStrict;
9563}
9564
9565
9566VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9567 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9568{
9569 VBOXSTRICTRC rcStrict;
9570 if ( cbOpcodeBytes
9571 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9572 {
9573 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9574#ifdef IEM_WITH_CODE_TLB
9575 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9576 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9577 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9578 pVCpu->iem.s.offCurInstrStart = 0;
9579 pVCpu->iem.s.offInstrNextByte = 0;
9580 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9581#else
9582 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9583 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9584#endif
9585 rcStrict = VINF_SUCCESS;
9586 }
9587 else
9588 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9589 if (rcStrict == VINF_SUCCESS)
9590 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9591 else if (pVCpu->iem.s.cActiveMappings > 0)
9592 iemMemRollback(pVCpu);
9593
9594 return rcStrict;
9595}
9596
9597
9598VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9599{
9600 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9601 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9602 if (rcStrict == VINF_SUCCESS)
9603 {
9604 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9605 if (pcbWritten)
9606 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9607 }
9608 else if (pVCpu->iem.s.cActiveMappings > 0)
9609 iemMemRollback(pVCpu);
9610
9611 return rcStrict;
9612}
9613
9614
9615VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9616 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9617{
9618 VBOXSTRICTRC rcStrict;
9619 if ( cbOpcodeBytes
9620 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9621 {
9622 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9623#ifdef IEM_WITH_CODE_TLB
9624 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9625 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9626 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9627 pVCpu->iem.s.offCurInstrStart = 0;
9628 pVCpu->iem.s.offInstrNextByte = 0;
9629 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9630#else
9631 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9632 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9633#endif
9634 rcStrict = VINF_SUCCESS;
9635 }
9636 else
9637 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9638 if (rcStrict == VINF_SUCCESS)
9639 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9640 else if (pVCpu->iem.s.cActiveMappings > 0)
9641 iemMemRollback(pVCpu);
9642
9643 return rcStrict;
9644}
9645
9646
9647/**
9648 * For handling split cacheline lock operations when the host has split-lock
9649 * detection enabled.
9650 *
9651 * This will cause the interpreter to disregard the lock prefix and implicit
9652 * locking (xchg).
9653 *
9654 * @returns Strict VBox status code.
9655 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9656 */
9657VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9658{
9659 /*
9660 * Do the decoding and emulation.
9661 */
9662 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9663 if (rcStrict == VINF_SUCCESS)
9664 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9665 else if (pVCpu->iem.s.cActiveMappings > 0)
9666 iemMemRollback(pVCpu);
9667
9668 if (rcStrict != VINF_SUCCESS)
9669 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9670 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9671 return rcStrict;
9672}
9673
9674
9675/**
9676 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9677 * inject a pending TRPM trap.
9678 */
9679VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9680{
9681 Assert(TRPMHasTrap(pVCpu));
9682
9683 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9684 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9685 {
9686 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9687#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9688 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9689 if (fIntrEnabled)
9690 {
9691 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9692 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9693 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9694 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9695 else
9696 {
9697 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9698 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9699 }
9700 }
9701#else
9702 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9703#endif
9704 if (fIntrEnabled)
9705 {
9706 uint8_t u8TrapNo;
9707 TRPMEVENT enmType;
9708 uint32_t uErrCode;
9709 RTGCPTR uCr2;
9710 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9711 AssertRC(rc2);
9712 Assert(enmType == TRPM_HARDWARE_INT);
9713 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9714
9715 TRPMResetTrap(pVCpu);
9716
9717#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9718 /* Injecting an event may cause a VM-exit. */
9719 if ( rcStrict != VINF_SUCCESS
9720 && rcStrict != VINF_IEM_RAISED_XCPT)
9721 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9722#else
9723 NOREF(rcStrict);
9724#endif
9725 }
9726 }
9727
9728 return VINF_SUCCESS;
9729}
9730
9731
9732VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9733{
9734 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9735 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9736 Assert(cMaxInstructions > 0);
9737
9738 /*
9739 * See if there is an interrupt pending in TRPM, inject it if we can.
9740 */
9741 /** @todo What if we are injecting an exception and not an interrupt? Is that
9742 * possible here? For now we assert it is indeed only an interrupt. */
9743 if (!TRPMHasTrap(pVCpu))
9744 { /* likely */ }
9745 else
9746 {
9747 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9748 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9749 { /*likely */ }
9750 else
9751 return rcStrict;
9752 }
9753
9754 /*
9755 * Initial decoder init w/ prefetch, then setup setjmp.
9756 */
9757 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9758 if (rcStrict == VINF_SUCCESS)
9759 {
9760#ifdef IEM_WITH_SETJMP
9761 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9762 IEM_TRY_SETJMP(pVCpu, rcStrict)
9763#endif
9764 {
9765 /*
9766 * The run loop. We limit ourselves to 4096 instructions right now.
9767 */
9768 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9769 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9770 for (;;)
9771 {
9772 /*
9773 * Log the state.
9774 */
9775#ifdef LOG_ENABLED
9776 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9777#endif
9778
9779 /*
9780 * Do the decoding and emulation.
9781 */
9782 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9783 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9784#ifdef VBOX_STRICT
9785 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9786#endif
9787 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9788 {
9789 Assert(pVCpu->iem.s.cActiveMappings == 0);
9790 pVCpu->iem.s.cInstructions++;
9791
9792#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9793 /* Perform any VMX nested-guest instruction boundary actions. */
9794 uint64_t fCpu = pVCpu->fLocalForcedActions;
9795 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9796 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9797 { /* likely */ }
9798 else
9799 {
9800 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9801 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9802 fCpu = pVCpu->fLocalForcedActions;
9803 else
9804 {
9805 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9806 break;
9807 }
9808 }
9809#endif
9810 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9811 {
9812#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9813 uint64_t fCpu = pVCpu->fLocalForcedActions;
9814#endif
9815 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9816 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9817 | VMCPU_FF_TLB_FLUSH
9818 | VMCPU_FF_UNHALT );
9819
9820 if (RT_LIKELY( ( !fCpu
9821 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9822 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9823 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9824 {
9825 if (--cMaxInstructionsGccStupidity > 0)
9826 {
9827 /* Poll timers every now an then according to the caller's specs. */
9828 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9829 || !TMTimerPollBool(pVM, pVCpu))
9830 {
9831 Assert(pVCpu->iem.s.cActiveMappings == 0);
9832 iemReInitDecoder(pVCpu);
9833 continue;
9834 }
9835 }
9836 }
9837 }
9838 Assert(pVCpu->iem.s.cActiveMappings == 0);
9839 }
9840 else if (pVCpu->iem.s.cActiveMappings > 0)
9841 iemMemRollback(pVCpu);
9842 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9843 break;
9844 }
9845 }
9846#ifdef IEM_WITH_SETJMP
9847 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9848 {
9849 if (pVCpu->iem.s.cActiveMappings > 0)
9850 iemMemRollback(pVCpu);
9851# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9852 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9853# endif
9854 pVCpu->iem.s.cLongJumps++;
9855 }
9856 IEM_CATCH_LONGJMP_END(pVCpu);
9857#endif
9858
9859 /*
9860 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9861 */
9862 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9863 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9864 }
9865 else
9866 {
9867 if (pVCpu->iem.s.cActiveMappings > 0)
9868 iemMemRollback(pVCpu);
9869
9870#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9871 /*
9872 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9873 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9874 */
9875 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9876#endif
9877 }
9878
9879 /*
9880 * Maybe re-enter raw-mode and log.
9881 */
9882 if (rcStrict != VINF_SUCCESS)
9883 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9884 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9885 if (pcInstructions)
9886 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9887 return rcStrict;
9888}
9889
9890
9891/**
9892 * Interface used by EMExecuteExec, does exit statistics and limits.
9893 *
9894 * @returns Strict VBox status code.
9895 * @param pVCpu The cross context virtual CPU structure.
9896 * @param fWillExit To be defined.
9897 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9898 * @param cMaxInstructions Maximum number of instructions to execute.
9899 * @param cMaxInstructionsWithoutExits
9900 * The max number of instructions without exits.
9901 * @param pStats Where to return statistics.
9902 */
9903VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9904 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9905{
9906 NOREF(fWillExit); /** @todo define flexible exit crits */
9907
9908 /*
9909 * Initialize return stats.
9910 */
9911 pStats->cInstructions = 0;
9912 pStats->cExits = 0;
9913 pStats->cMaxExitDistance = 0;
9914 pStats->cReserved = 0;
9915
9916 /*
9917 * Initial decoder init w/ prefetch, then setup setjmp.
9918 */
9919 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9920 if (rcStrict == VINF_SUCCESS)
9921 {
9922#ifdef IEM_WITH_SETJMP
9923 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9924 IEM_TRY_SETJMP(pVCpu, rcStrict)
9925#endif
9926 {
9927#ifdef IN_RING0
9928 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9929#endif
9930 uint32_t cInstructionSinceLastExit = 0;
9931
9932 /*
9933 * The run loop. We limit ourselves to 4096 instructions right now.
9934 */
9935 PVM pVM = pVCpu->CTX_SUFF(pVM);
9936 for (;;)
9937 {
9938 /*
9939 * Log the state.
9940 */
9941#ifdef LOG_ENABLED
9942 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9943#endif
9944
9945 /*
9946 * Do the decoding and emulation.
9947 */
9948 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9949
9950 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9951 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9952
9953 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9954 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9955 {
9956 pStats->cExits += 1;
9957 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9958 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9959 cInstructionSinceLastExit = 0;
9960 }
9961
9962 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9963 {
9964 Assert(pVCpu->iem.s.cActiveMappings == 0);
9965 pVCpu->iem.s.cInstructions++;
9966 pStats->cInstructions++;
9967 cInstructionSinceLastExit++;
9968
9969#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9970 /* Perform any VMX nested-guest instruction boundary actions. */
9971 uint64_t fCpu = pVCpu->fLocalForcedActions;
9972 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9973 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9974 { /* likely */ }
9975 else
9976 {
9977 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9978 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9979 fCpu = pVCpu->fLocalForcedActions;
9980 else
9981 {
9982 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9983 break;
9984 }
9985 }
9986#endif
9987 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9988 {
9989#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9990 uint64_t fCpu = pVCpu->fLocalForcedActions;
9991#endif
9992 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9993 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9994 | VMCPU_FF_TLB_FLUSH
9995 | VMCPU_FF_UNHALT );
9996 if (RT_LIKELY( ( ( !fCpu
9997 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9998 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9999 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10000 || pStats->cInstructions < cMinInstructions))
10001 {
10002 if (pStats->cInstructions < cMaxInstructions)
10003 {
10004 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10005 {
10006#ifdef IN_RING0
10007 if ( !fCheckPreemptionPending
10008 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10009#endif
10010 {
10011 Assert(pVCpu->iem.s.cActiveMappings == 0);
10012 iemReInitDecoder(pVCpu);
10013 continue;
10014 }
10015#ifdef IN_RING0
10016 rcStrict = VINF_EM_RAW_INTERRUPT;
10017 break;
10018#endif
10019 }
10020 }
10021 }
10022 Assert(!(fCpu & VMCPU_FF_IEM));
10023 }
10024 Assert(pVCpu->iem.s.cActiveMappings == 0);
10025 }
10026 else if (pVCpu->iem.s.cActiveMappings > 0)
10027 iemMemRollback(pVCpu);
10028 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10029 break;
10030 }
10031 }
10032#ifdef IEM_WITH_SETJMP
10033 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
10034 {
10035 if (pVCpu->iem.s.cActiveMappings > 0)
10036 iemMemRollback(pVCpu);
10037 pVCpu->iem.s.cLongJumps++;
10038 }
10039 IEM_CATCH_LONGJMP_END(pVCpu);
10040#endif
10041
10042 /*
10043 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10044 */
10045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10046 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10047 }
10048 else
10049 {
10050 if (pVCpu->iem.s.cActiveMappings > 0)
10051 iemMemRollback(pVCpu);
10052
10053#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10054 /*
10055 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10056 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10057 */
10058 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10059#endif
10060 }
10061
10062 /*
10063 * Maybe re-enter raw-mode and log.
10064 */
10065 if (rcStrict != VINF_SUCCESS)
10066 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10067 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10068 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10069 return rcStrict;
10070}
10071
10072
10073/**
10074 * Injects a trap, fault, abort, software interrupt or external interrupt.
10075 *
10076 * The parameter list matches TRPMQueryTrapAll pretty closely.
10077 *
10078 * @returns Strict VBox status code.
10079 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10080 * @param u8TrapNo The trap number.
10081 * @param enmType What type is it (trap/fault/abort), software
10082 * interrupt or hardware interrupt.
10083 * @param uErrCode The error code if applicable.
10084 * @param uCr2 The CR2 value if applicable.
10085 * @param cbInstr The instruction length (only relevant for
10086 * software interrupts).
10087 */
10088VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10089 uint8_t cbInstr)
10090{
10091 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
10092#ifdef DBGFTRACE_ENABLED
10093 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10094 u8TrapNo, enmType, uErrCode, uCr2);
10095#endif
10096
10097 uint32_t fFlags;
10098 switch (enmType)
10099 {
10100 case TRPM_HARDWARE_INT:
10101 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10102 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10103 uErrCode = uCr2 = 0;
10104 break;
10105
10106 case TRPM_SOFTWARE_INT:
10107 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10108 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10109 uErrCode = uCr2 = 0;
10110 break;
10111
10112 case TRPM_TRAP:
10113 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
10114 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10115 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10116 if (u8TrapNo == X86_XCPT_PF)
10117 fFlags |= IEM_XCPT_FLAGS_CR2;
10118 switch (u8TrapNo)
10119 {
10120 case X86_XCPT_DF:
10121 case X86_XCPT_TS:
10122 case X86_XCPT_NP:
10123 case X86_XCPT_SS:
10124 case X86_XCPT_PF:
10125 case X86_XCPT_AC:
10126 case X86_XCPT_GP:
10127 fFlags |= IEM_XCPT_FLAGS_ERR;
10128 break;
10129 }
10130 break;
10131
10132 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10133 }
10134
10135 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10136
10137 if (pVCpu->iem.s.cActiveMappings > 0)
10138 iemMemRollback(pVCpu);
10139
10140 return rcStrict;
10141}
10142
10143
10144/**
10145 * Injects the active TRPM event.
10146 *
10147 * @returns Strict VBox status code.
10148 * @param pVCpu The cross context virtual CPU structure.
10149 */
10150VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10151{
10152#ifndef IEM_IMPLEMENTS_TASKSWITCH
10153 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10154#else
10155 uint8_t u8TrapNo;
10156 TRPMEVENT enmType;
10157 uint32_t uErrCode;
10158 RTGCUINTPTR uCr2;
10159 uint8_t cbInstr;
10160 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10161 if (RT_FAILURE(rc))
10162 return rc;
10163
10164 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10165 * ICEBP \#DB injection as a special case. */
10166 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10167#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10168 if (rcStrict == VINF_SVM_VMEXIT)
10169 rcStrict = VINF_SUCCESS;
10170#endif
10171#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10172 if (rcStrict == VINF_VMX_VMEXIT)
10173 rcStrict = VINF_SUCCESS;
10174#endif
10175 /** @todo Are there any other codes that imply the event was successfully
10176 * delivered to the guest? See @bugref{6607}. */
10177 if ( rcStrict == VINF_SUCCESS
10178 || rcStrict == VINF_IEM_RAISED_XCPT)
10179 TRPMResetTrap(pVCpu);
10180
10181 return rcStrict;
10182#endif
10183}
10184
10185
10186VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10187{
10188 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10189 return VERR_NOT_IMPLEMENTED;
10190}
10191
10192
10193VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10194{
10195 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10196 return VERR_NOT_IMPLEMENTED;
10197}
10198
10199
10200/**
10201 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10202 *
10203 * This API ASSUMES that the caller has already verified that the guest code is
10204 * allowed to access the I/O port. (The I/O port is in the DX register in the
10205 * guest state.)
10206 *
10207 * @returns Strict VBox status code.
10208 * @param pVCpu The cross context virtual CPU structure.
10209 * @param cbValue The size of the I/O port access (1, 2, or 4).
10210 * @param enmAddrMode The addressing mode.
10211 * @param fRepPrefix Indicates whether a repeat prefix is used
10212 * (doesn't matter which for this instruction).
10213 * @param cbInstr The instruction length in bytes.
10214 * @param iEffSeg The effective segment address.
10215 * @param fIoChecked Whether the access to the I/O port has been
10216 * checked or not. It's typically checked in the
10217 * HM scenario.
10218 */
10219VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10220 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10221{
10222 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10223 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10224
10225 /*
10226 * State init.
10227 */
10228 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10229
10230 /*
10231 * Switch orgy for getting to the right handler.
10232 */
10233 VBOXSTRICTRC rcStrict;
10234 if (fRepPrefix)
10235 {
10236 switch (enmAddrMode)
10237 {
10238 case IEMMODE_16BIT:
10239 switch (cbValue)
10240 {
10241 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10242 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10243 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10244 default:
10245 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10246 }
10247 break;
10248
10249 case IEMMODE_32BIT:
10250 switch (cbValue)
10251 {
10252 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10253 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10254 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10255 default:
10256 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10257 }
10258 break;
10259
10260 case IEMMODE_64BIT:
10261 switch (cbValue)
10262 {
10263 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10264 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10265 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10266 default:
10267 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10268 }
10269 break;
10270
10271 default:
10272 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10273 }
10274 }
10275 else
10276 {
10277 switch (enmAddrMode)
10278 {
10279 case IEMMODE_16BIT:
10280 switch (cbValue)
10281 {
10282 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10283 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10284 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10285 default:
10286 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10287 }
10288 break;
10289
10290 case IEMMODE_32BIT:
10291 switch (cbValue)
10292 {
10293 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10294 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10295 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10296 default:
10297 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10298 }
10299 break;
10300
10301 case IEMMODE_64BIT:
10302 switch (cbValue)
10303 {
10304 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10305 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10306 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10307 default:
10308 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10309 }
10310 break;
10311
10312 default:
10313 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10314 }
10315 }
10316
10317 if (pVCpu->iem.s.cActiveMappings)
10318 iemMemRollback(pVCpu);
10319
10320 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10321}
10322
10323
10324/**
10325 * Interface for HM and EM for executing string I/O IN (read) instructions.
10326 *
10327 * This API ASSUMES that the caller has already verified that the guest code is
10328 * allowed to access the I/O port. (The I/O port is in the DX register in the
10329 * guest state.)
10330 *
10331 * @returns Strict VBox status code.
10332 * @param pVCpu The cross context virtual CPU structure.
10333 * @param cbValue The size of the I/O port access (1, 2, or 4).
10334 * @param enmAddrMode The addressing mode.
10335 * @param fRepPrefix Indicates whether a repeat prefix is used
10336 * (doesn't matter which for this instruction).
10337 * @param cbInstr The instruction length in bytes.
10338 * @param fIoChecked Whether the access to the I/O port has been
10339 * checked or not. It's typically checked in the
10340 * HM scenario.
10341 */
10342VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10343 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10344{
10345 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10346
10347 /*
10348 * State init.
10349 */
10350 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10351
10352 /*
10353 * Switch orgy for getting to the right handler.
10354 */
10355 VBOXSTRICTRC rcStrict;
10356 if (fRepPrefix)
10357 {
10358 switch (enmAddrMode)
10359 {
10360 case IEMMODE_16BIT:
10361 switch (cbValue)
10362 {
10363 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10364 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10365 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10366 default:
10367 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10368 }
10369 break;
10370
10371 case IEMMODE_32BIT:
10372 switch (cbValue)
10373 {
10374 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10375 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10376 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10377 default:
10378 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10379 }
10380 break;
10381
10382 case IEMMODE_64BIT:
10383 switch (cbValue)
10384 {
10385 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10386 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10387 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10388 default:
10389 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10390 }
10391 break;
10392
10393 default:
10394 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10395 }
10396 }
10397 else
10398 {
10399 switch (enmAddrMode)
10400 {
10401 case IEMMODE_16BIT:
10402 switch (cbValue)
10403 {
10404 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10405 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10406 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10407 default:
10408 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10409 }
10410 break;
10411
10412 case IEMMODE_32BIT:
10413 switch (cbValue)
10414 {
10415 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10416 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10417 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10418 default:
10419 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10420 }
10421 break;
10422
10423 case IEMMODE_64BIT:
10424 switch (cbValue)
10425 {
10426 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10427 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10428 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10429 default:
10430 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10431 }
10432 break;
10433
10434 default:
10435 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10436 }
10437 }
10438
10439 if ( pVCpu->iem.s.cActiveMappings == 0
10440 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10441 { /* likely */ }
10442 else
10443 {
10444 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10445 iemMemRollback(pVCpu);
10446 }
10447 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10448}
10449
10450
10451/**
10452 * Interface for rawmode to write execute an OUT instruction.
10453 *
10454 * @returns Strict VBox status code.
10455 * @param pVCpu The cross context virtual CPU structure.
10456 * @param cbInstr The instruction length in bytes.
10457 * @param u16Port The port to read.
10458 * @param fImm Whether the port is specified using an immediate operand or
10459 * using the implicit DX register.
10460 * @param cbReg The register size.
10461 *
10462 * @remarks In ring-0 not all of the state needs to be synced in.
10463 */
10464VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10465{
10466 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10467 Assert(cbReg <= 4 && cbReg != 3);
10468
10469 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10470 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10471 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10472 Assert(!pVCpu->iem.s.cActiveMappings);
10473 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10474}
10475
10476
10477/**
10478 * Interface for rawmode to write execute an IN instruction.
10479 *
10480 * @returns Strict VBox status code.
10481 * @param pVCpu The cross context virtual CPU structure.
10482 * @param cbInstr The instruction length in bytes.
10483 * @param u16Port The port to read.
10484 * @param fImm Whether the port is specified using an immediate operand or
10485 * using the implicit DX.
10486 * @param cbReg The register size.
10487 */
10488VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10489{
10490 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10491 Assert(cbReg <= 4 && cbReg != 3);
10492
10493 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10494 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10495 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10496 Assert(!pVCpu->iem.s.cActiveMappings);
10497 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10498}
10499
10500
10501/**
10502 * Interface for HM and EM to write to a CRx register.
10503 *
10504 * @returns Strict VBox status code.
10505 * @param pVCpu The cross context virtual CPU structure.
10506 * @param cbInstr The instruction length in bytes.
10507 * @param iCrReg The control register number (destination).
10508 * @param iGReg The general purpose register number (source).
10509 *
10510 * @remarks In ring-0 not all of the state needs to be synced in.
10511 */
10512VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10513{
10514 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10515 Assert(iCrReg < 16);
10516 Assert(iGReg < 16);
10517
10518 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10519 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10520 Assert(!pVCpu->iem.s.cActiveMappings);
10521 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10522}
10523
10524
10525/**
10526 * Interface for HM and EM to read from a CRx register.
10527 *
10528 * @returns Strict VBox status code.
10529 * @param pVCpu The cross context virtual CPU structure.
10530 * @param cbInstr The instruction length in bytes.
10531 * @param iGReg The general purpose register number (destination).
10532 * @param iCrReg The control register number (source).
10533 *
10534 * @remarks In ring-0 not all of the state needs to be synced in.
10535 */
10536VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10537{
10538 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10539 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10540 | CPUMCTX_EXTRN_APIC_TPR);
10541 Assert(iCrReg < 16);
10542 Assert(iGReg < 16);
10543
10544 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10545 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10546 Assert(!pVCpu->iem.s.cActiveMappings);
10547 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10548}
10549
10550
10551/**
10552 * Interface for HM and EM to write to a DRx register.
10553 *
10554 * @returns Strict VBox status code.
10555 * @param pVCpu The cross context virtual CPU structure.
10556 * @param cbInstr The instruction length in bytes.
10557 * @param iDrReg The debug register number (destination).
10558 * @param iGReg The general purpose register number (source).
10559 *
10560 * @remarks In ring-0 not all of the state needs to be synced in.
10561 */
10562VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10563{
10564 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10565 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10566 Assert(iDrReg < 8);
10567 Assert(iGReg < 16);
10568
10569 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10570 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10571 Assert(!pVCpu->iem.s.cActiveMappings);
10572 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10573}
10574
10575
10576/**
10577 * Interface for HM and EM to read from a DRx register.
10578 *
10579 * @returns Strict VBox status code.
10580 * @param pVCpu The cross context virtual CPU structure.
10581 * @param cbInstr The instruction length in bytes.
10582 * @param iGReg The general purpose register number (destination).
10583 * @param iDrReg The debug register number (source).
10584 *
10585 * @remarks In ring-0 not all of the state needs to be synced in.
10586 */
10587VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10588{
10589 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10590 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10591 Assert(iDrReg < 8);
10592 Assert(iGReg < 16);
10593
10594 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10595 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10596 Assert(!pVCpu->iem.s.cActiveMappings);
10597 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10598}
10599
10600
10601/**
10602 * Interface for HM and EM to clear the CR0[TS] bit.
10603 *
10604 * @returns Strict VBox status code.
10605 * @param pVCpu The cross context virtual CPU structure.
10606 * @param cbInstr The instruction length in bytes.
10607 *
10608 * @remarks In ring-0 not all of the state needs to be synced in.
10609 */
10610VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10611{
10612 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10613
10614 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10615 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10616 Assert(!pVCpu->iem.s.cActiveMappings);
10617 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10618}
10619
10620
10621/**
10622 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10623 *
10624 * @returns Strict VBox status code.
10625 * @param pVCpu The cross context virtual CPU structure.
10626 * @param cbInstr The instruction length in bytes.
10627 * @param uValue The value to load into CR0.
10628 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10629 * memory operand. Otherwise pass NIL_RTGCPTR.
10630 *
10631 * @remarks In ring-0 not all of the state needs to be synced in.
10632 */
10633VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10634{
10635 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10636
10637 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10638 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10639 Assert(!pVCpu->iem.s.cActiveMappings);
10640 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10641}
10642
10643
10644/**
10645 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10646 *
10647 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10648 *
10649 * @returns Strict VBox status code.
10650 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10651 * @param cbInstr The instruction length in bytes.
10652 * @remarks In ring-0 not all of the state needs to be synced in.
10653 * @thread EMT(pVCpu)
10654 */
10655VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10656{
10657 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10658
10659 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10660 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10661 Assert(!pVCpu->iem.s.cActiveMappings);
10662 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10663}
10664
10665
10666/**
10667 * Interface for HM and EM to emulate the WBINVD instruction.
10668 *
10669 * @returns Strict VBox status code.
10670 * @param pVCpu The cross context virtual CPU structure.
10671 * @param cbInstr The instruction length in bytes.
10672 *
10673 * @remarks In ring-0 not all of the state needs to be synced in.
10674 */
10675VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10676{
10677 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10678
10679 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10680 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10681 Assert(!pVCpu->iem.s.cActiveMappings);
10682 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10683}
10684
10685
10686/**
10687 * Interface for HM and EM to emulate the INVD instruction.
10688 *
10689 * @returns Strict VBox status code.
10690 * @param pVCpu The cross context virtual CPU structure.
10691 * @param cbInstr The instruction length in bytes.
10692 *
10693 * @remarks In ring-0 not all of the state needs to be synced in.
10694 */
10695VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10696{
10697 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10698
10699 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10700 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10701 Assert(!pVCpu->iem.s.cActiveMappings);
10702 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10703}
10704
10705
10706/**
10707 * Interface for HM and EM to emulate the INVLPG instruction.
10708 *
10709 * @returns Strict VBox status code.
10710 * @retval VINF_PGM_SYNC_CR3
10711 *
10712 * @param pVCpu The cross context virtual CPU structure.
10713 * @param cbInstr The instruction length in bytes.
10714 * @param GCPtrPage The effective address of the page to invalidate.
10715 *
10716 * @remarks In ring-0 not all of the state needs to be synced in.
10717 */
10718VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10719{
10720 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10721
10722 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10723 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10724 Assert(!pVCpu->iem.s.cActiveMappings);
10725 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10726}
10727
10728
10729/**
10730 * Interface for HM and EM to emulate the INVPCID instruction.
10731 *
10732 * @returns Strict VBox status code.
10733 * @retval VINF_PGM_SYNC_CR3
10734 *
10735 * @param pVCpu The cross context virtual CPU structure.
10736 * @param cbInstr The instruction length in bytes.
10737 * @param iEffSeg The effective segment register.
10738 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10739 * @param uType The invalidation type.
10740 *
10741 * @remarks In ring-0 not all of the state needs to be synced in.
10742 */
10743VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10744 uint64_t uType)
10745{
10746 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10747
10748 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10749 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10750 Assert(!pVCpu->iem.s.cActiveMappings);
10751 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10752}
10753
10754
10755/**
10756 * Interface for HM and EM to emulate the CPUID instruction.
10757 *
10758 * @returns Strict VBox status code.
10759 *
10760 * @param pVCpu The cross context virtual CPU structure.
10761 * @param cbInstr The instruction length in bytes.
10762 *
10763 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10764 */
10765VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10766{
10767 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10768 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10769
10770 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10771 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10772 Assert(!pVCpu->iem.s.cActiveMappings);
10773 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10774}
10775
10776
10777/**
10778 * Interface for HM and EM to emulate the RDPMC instruction.
10779 *
10780 * @returns Strict VBox status code.
10781 *
10782 * @param pVCpu The cross context virtual CPU structure.
10783 * @param cbInstr The instruction length in bytes.
10784 *
10785 * @remarks Not all of the state needs to be synced in.
10786 */
10787VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10788{
10789 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10790 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10791
10792 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10793 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10794 Assert(!pVCpu->iem.s.cActiveMappings);
10795 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10796}
10797
10798
10799/**
10800 * Interface for HM and EM to emulate the RDTSC instruction.
10801 *
10802 * @returns Strict VBox status code.
10803 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10804 *
10805 * @param pVCpu The cross context virtual CPU structure.
10806 * @param cbInstr The instruction length in bytes.
10807 *
10808 * @remarks Not all of the state needs to be synced in.
10809 */
10810VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10811{
10812 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10813 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10814
10815 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10816 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10817 Assert(!pVCpu->iem.s.cActiveMappings);
10818 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10819}
10820
10821
10822/**
10823 * Interface for HM and EM to emulate the RDTSCP instruction.
10824 *
10825 * @returns Strict VBox status code.
10826 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10827 *
10828 * @param pVCpu The cross context virtual CPU structure.
10829 * @param cbInstr The instruction length in bytes.
10830 *
10831 * @remarks Not all of the state needs to be synced in. Recommended
10832 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10833 */
10834VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10835{
10836 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10837 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10838
10839 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10840 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10841 Assert(!pVCpu->iem.s.cActiveMappings);
10842 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10843}
10844
10845
10846/**
10847 * Interface for HM and EM to emulate the RDMSR instruction.
10848 *
10849 * @returns Strict VBox status code.
10850 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10851 *
10852 * @param pVCpu The cross context virtual CPU structure.
10853 * @param cbInstr The instruction length in bytes.
10854 *
10855 * @remarks Not all of the state needs to be synced in. Requires RCX and
10856 * (currently) all MSRs.
10857 */
10858VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10859{
10860 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10861 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10862
10863 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10864 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10865 Assert(!pVCpu->iem.s.cActiveMappings);
10866 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10867}
10868
10869
10870/**
10871 * Interface for HM and EM to emulate the WRMSR instruction.
10872 *
10873 * @returns Strict VBox status code.
10874 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10875 *
10876 * @param pVCpu The cross context virtual CPU structure.
10877 * @param cbInstr The instruction length in bytes.
10878 *
10879 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10880 * and (currently) all MSRs.
10881 */
10882VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10883{
10884 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10885 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10886 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10887
10888 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10889 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10890 Assert(!pVCpu->iem.s.cActiveMappings);
10891 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10892}
10893
10894
10895/**
10896 * Interface for HM and EM to emulate the MONITOR instruction.
10897 *
10898 * @returns Strict VBox status code.
10899 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10900 *
10901 * @param pVCpu The cross context virtual CPU structure.
10902 * @param cbInstr The instruction length in bytes.
10903 *
10904 * @remarks Not all of the state needs to be synced in.
10905 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10906 * are used.
10907 */
10908VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10909{
10910 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10911 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10912
10913 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10914 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10915 Assert(!pVCpu->iem.s.cActiveMappings);
10916 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10917}
10918
10919
10920/**
10921 * Interface for HM and EM to emulate the MWAIT instruction.
10922 *
10923 * @returns Strict VBox status code.
10924 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10925 *
10926 * @param pVCpu The cross context virtual CPU structure.
10927 * @param cbInstr The instruction length in bytes.
10928 *
10929 * @remarks Not all of the state needs to be synced in.
10930 */
10931VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10932{
10933 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10934 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10935
10936 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10937 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10938 Assert(!pVCpu->iem.s.cActiveMappings);
10939 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10940}
10941
10942
10943/**
10944 * Interface for HM and EM to emulate the HLT instruction.
10945 *
10946 * @returns Strict VBox status code.
10947 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10948 *
10949 * @param pVCpu The cross context virtual CPU structure.
10950 * @param cbInstr The instruction length in bytes.
10951 *
10952 * @remarks Not all of the state needs to be synced in.
10953 */
10954VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10955{
10956 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10957
10958 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10959 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10960 Assert(!pVCpu->iem.s.cActiveMappings);
10961 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10962}
10963
10964
10965/**
10966 * Checks if IEM is in the process of delivering an event (interrupt or
10967 * exception).
10968 *
10969 * @returns true if we're in the process of raising an interrupt or exception,
10970 * false otherwise.
10971 * @param pVCpu The cross context virtual CPU structure.
10972 * @param puVector Where to store the vector associated with the
10973 * currently delivered event, optional.
10974 * @param pfFlags Where to store th event delivery flags (see
10975 * IEM_XCPT_FLAGS_XXX), optional.
10976 * @param puErr Where to store the error code associated with the
10977 * event, optional.
10978 * @param puCr2 Where to store the CR2 associated with the event,
10979 * optional.
10980 * @remarks The caller should check the flags to determine if the error code and
10981 * CR2 are valid for the event.
10982 */
10983VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10984{
10985 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10986 if (fRaisingXcpt)
10987 {
10988 if (puVector)
10989 *puVector = pVCpu->iem.s.uCurXcpt;
10990 if (pfFlags)
10991 *pfFlags = pVCpu->iem.s.fCurXcpt;
10992 if (puErr)
10993 *puErr = pVCpu->iem.s.uCurXcptErr;
10994 if (puCr2)
10995 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10996 }
10997 return fRaisingXcpt;
10998}
10999
11000#ifdef IN_RING3
11001
11002/**
11003 * Handles the unlikely and probably fatal merge cases.
11004 *
11005 * @returns Merged status code.
11006 * @param rcStrict Current EM status code.
11007 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11008 * with @a rcStrict.
11009 * @param iMemMap The memory mapping index. For error reporting only.
11010 * @param pVCpu The cross context virtual CPU structure of the calling
11011 * thread, for error reporting only.
11012 */
11013DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11014 unsigned iMemMap, PVMCPUCC pVCpu)
11015{
11016 if (RT_FAILURE_NP(rcStrict))
11017 return rcStrict;
11018
11019 if (RT_FAILURE_NP(rcStrictCommit))
11020 return rcStrictCommit;
11021
11022 if (rcStrict == rcStrictCommit)
11023 return rcStrictCommit;
11024
11025 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11026 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11027 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11028 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11029 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11030 return VERR_IOM_FF_STATUS_IPE;
11031}
11032
11033
11034/**
11035 * Helper for IOMR3ProcessForceFlag.
11036 *
11037 * @returns Merged status code.
11038 * @param rcStrict Current EM status code.
11039 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11040 * with @a rcStrict.
11041 * @param iMemMap The memory mapping index. For error reporting only.
11042 * @param pVCpu The cross context virtual CPU structure of the calling
11043 * thread, for error reporting only.
11044 */
11045DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11046{
11047 /* Simple. */
11048 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11049 return rcStrictCommit;
11050
11051 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11052 return rcStrict;
11053
11054 /* EM scheduling status codes. */
11055 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11056 && rcStrict <= VINF_EM_LAST))
11057 {
11058 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11059 && rcStrictCommit <= VINF_EM_LAST))
11060 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11061 }
11062
11063 /* Unlikely */
11064 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11065}
11066
11067
11068/**
11069 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11070 *
11071 * @returns Merge between @a rcStrict and what the commit operation returned.
11072 * @param pVM The cross context VM structure.
11073 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11074 * @param rcStrict The status code returned by ring-0 or raw-mode.
11075 */
11076VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11077{
11078 /*
11079 * Reset the pending commit.
11080 */
11081 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11082 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11083 ("%#x %#x %#x\n",
11084 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11085 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11086
11087 /*
11088 * Commit the pending bounce buffers (usually just one).
11089 */
11090 unsigned cBufs = 0;
11091 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11092 while (iMemMap-- > 0)
11093 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11094 {
11095 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11096 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11097 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11098
11099 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11100 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11101 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11102
11103 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11104 {
11105 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11106 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11107 pbBuf,
11108 cbFirst,
11109 PGMACCESSORIGIN_IEM);
11110 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11111 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11112 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11113 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11114 }
11115
11116 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11117 {
11118 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11119 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11120 pbBuf + cbFirst,
11121 cbSecond,
11122 PGMACCESSORIGIN_IEM);
11123 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11124 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11125 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11126 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11127 }
11128 cBufs++;
11129 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11130 }
11131
11132 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11133 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11134 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11135 pVCpu->iem.s.cActiveMappings = 0;
11136 return rcStrict;
11137}
11138
11139#endif /* IN_RING3 */
11140
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette