VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 103951

Last change on this file since 103951 was 103951, checked in by vboxsync, 2 months ago

VMM/IEM: Rename iemMemFetchDataU256AlignedSse(Jmp) to iemMemFetchDataU256AlignedAvx(Jmp) and adjust the microcode statements, convert the code to be instantiated by the memory RW templates, bugref:10614

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 438.6 KB
Line 
1/* $Id: IEMAll.cpp 103951 2024-03-20 11:59:02Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 * @param enmReason The reason we're called.
757 *
758 * @remarks Caller holds the PGM lock.
759 */
760VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
761{
762#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
763 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
764 if (pVCpuCaller)
765 VMCPU_ASSERT_EMT(pVCpuCaller);
766 Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
767
768 VMCC_FOR_EACH_VMCPU(pVM)
769 {
770# ifdef IEM_WITH_CODE_TLB
771 if (pVCpuCaller == pVCpu)
772 pVCpu->iem.s.cbInstrBufTotal = 0;
773# endif
774
775 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
776 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
777 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
778 { /* likely */}
779 else if (pVCpuCaller != pVCpu)
780 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
781 else
782 {
783 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
784 continue;
785 }
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
788 }
789 VMCC_FOR_EACH_VMCPU_END(pVM);
790
791#else
792 RT_NOREF(pVM, idCpuCaller, enmReason);
793#endif
794}
795
796
797/**
798 * Flushes the prefetch buffer, light version.
799 */
800void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
801{
802#ifndef IEM_WITH_CODE_TLB
803 pVCpu->iem.s.cbOpcode = cbInstr;
804#else
805 RT_NOREF(pVCpu, cbInstr);
806#endif
807}
808
809
810/**
811 * Flushes the prefetch buffer, heavy version.
812 */
813void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
814{
815#ifndef IEM_WITH_CODE_TLB
816 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
817#elif 1
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 * - TLB loading in the recompiler (@a pvDst = NULL, @a cbDst = 0).
839 *
840 * @param pVCpu The cross context virtual CPU structure of the
841 * calling thread.
842 * @param pvDst Where to return the bytes.
843 * @param cbDst Number of bytes to read. A value of zero is
844 * allowed for initializing pbInstrBuf (the
845 * recompiler does this). In this case it is best
846 * to set pbInstrBuf to NULL prior to the call.
847 */
848void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
849{
850# ifdef IN_RING3
851 for (;;)
852 {
853 Assert(cbDst <= 8);
854 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
855
856 /*
857 * We might have a partial buffer match, deal with that first to make the
858 * rest simpler. This is the first part of the cross page/buffer case.
859 */
860 uint8_t const * const pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
861 if (pbInstrBuf != NULL)
862 {
863 Assert(cbDst != 0); /* pbInstrBuf shall be NULL in case of a TLB load */
864 uint32_t const cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
865 if (offBuf < cbInstrBuf)
866 {
867 Assert(offBuf + cbDst > cbInstrBuf);
868 uint32_t const cbCopy = cbInstrBuf - offBuf;
869 memcpy(pvDst, &pbInstrBuf[offBuf], cbCopy);
870
871 cbDst -= cbCopy;
872 pvDst = (uint8_t *)pvDst + cbCopy;
873 offBuf += cbCopy;
874 }
875 }
876
877 /*
878 * Check segment limit, figuring how much we're allowed to access at this point.
879 *
880 * We will fault immediately if RIP is past the segment limit / in non-canonical
881 * territory. If we do continue, there are one or more bytes to read before we
882 * end up in trouble and we need to do that first before faulting.
883 */
884 RTGCPTR GCPtrFirst;
885 uint32_t cbMaxRead;
886 if (IEM_IS_64BIT_CODE(pVCpu))
887 {
888 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
889 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
890 { /* likely */ }
891 else
892 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
893 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
894 }
895 else
896 {
897 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
898 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
899 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
900 { /* likely */ }
901 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
902 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
903 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
904 if (cbMaxRead != 0)
905 { /* likely */ }
906 else
907 {
908 /* Overflowed because address is 0 and limit is max. */
909 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
910 cbMaxRead = X86_PAGE_SIZE;
911 }
912 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
913 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
914 if (cbMaxRead2 < cbMaxRead)
915 cbMaxRead = cbMaxRead2;
916 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
917 }
918
919 /*
920 * Get the TLB entry for this piece of code.
921 */
922 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
923 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
924 if (pTlbe->uTag == uTag)
925 {
926 /* likely when executing lots of code, otherwise unlikely */
927# ifdef VBOX_WITH_STATISTICS
928 pVCpu->iem.s.CodeTlb.cTlbHits++;
929# endif
930 }
931 else
932 {
933 pVCpu->iem.s.CodeTlb.cTlbMisses++;
934 PGMPTWALK Walk;
935 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
936 if (RT_FAILURE(rc))
937 {
938#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
939 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
940 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
941#endif
942 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
943 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
944 }
945
946 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
947 Assert(Walk.fSucceeded);
948 pTlbe->uTag = uTag;
949 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
950 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
951 pTlbe->GCPhys = Walk.GCPhys;
952 pTlbe->pbMappingR3 = NULL;
953 }
954
955 /*
956 * Check TLB page table level access flags.
957 */
958 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
959 {
960 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
961 {
962 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
963 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
964 }
965 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
966 {
967 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
968 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
969 }
970 }
971
972 /*
973 * Set the accessed flags.
974 * ASSUMES this is set when the address is translated rather than on commit...
975 */
976 /** @todo testcase: check when the A bit are actually set by the CPU for code. */
977 if (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED)
978 {
979 int rc2 = PGMGstModifyPage(pVCpu, GCPtrFirst, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
980 AssertRC(rc2);
981 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
982 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
983 pTlbe->fFlagsAndPhysRev &= ~IEMTLBE_F_PT_NO_ACCESSED;
984 }
985
986 /*
987 * Look up the physical page info if necessary.
988 */
989 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
990 { /* not necessary */ }
991 else
992 {
993 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
994 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
995 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
996 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
997 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
998 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
999 { /* likely */ }
1000 else
1001 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
1002 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1003 | IEMTLBE_F_NO_MAPPINGR3
1004 | IEMTLBE_F_PG_NO_READ
1005 | IEMTLBE_F_PG_NO_WRITE
1006 | IEMTLBE_F_PG_UNASSIGNED
1007 | IEMTLBE_F_PG_CODE_PAGE);
1008 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1009 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1010 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
1011 }
1012
1013# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1014 /*
1015 * Try do a direct read using the pbMappingR3 pointer.
1016 */
1017 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1018 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1019 {
1020 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1021 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1022 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1023 {
1024 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1025 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1026 }
1027 else
1028 {
1029 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1030 if (cbInstr + (uint32_t)cbDst <= 15)
1031 {
1032 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1033 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1034 }
1035 else
1036 {
1037 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1038 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1039 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1040 }
1041 }
1042 if (cbDst <= cbMaxRead)
1043 {
1044 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1045 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1046
1047 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1048 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1049 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1050 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1051 if (cbDst > 0) /* To make ASAN happy in the TLB load case. */
1052 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1053 else
1054 Assert(!pvDst);
1055 return;
1056 }
1057 pVCpu->iem.s.pbInstrBuf = NULL;
1058
1059 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1060 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1061 }
1062# else
1063# error "refactor as needed"
1064 /*
1065 * If there is no special read handling, so we can read a bit more and
1066 * put it in the prefetch buffer.
1067 */
1068 if ( cbDst < cbMaxRead
1069 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1070 {
1071 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1072 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1073 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1074 { /* likely */ }
1075 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1076 {
1077 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1078 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1079 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1080 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1081 }
1082 else
1083 {
1084 Log((RT_SUCCESS(rcStrict)
1085 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1086 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1087 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1088 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1089 }
1090 }
1091# endif
1092 /*
1093 * Special read handling, so only read exactly what's needed.
1094 * This is a highly unlikely scenario.
1095 */
1096 else
1097 {
1098 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1099
1100 /* Check instruction length. */
1101 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1102 if (RT_LIKELY(cbInstr + cbDst <= 15))
1103 { /* likely */ }
1104 else
1105 {
1106 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1107 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1108 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1109 }
1110
1111 /* Do the reading. */
1112 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1113 if (cbToRead > 0)
1114 {
1115 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1116 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1117 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1118 { /* likely */ }
1119 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1120 {
1121 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1122 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1123 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1124 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1125 }
1126 else
1127 {
1128 Log((RT_SUCCESS(rcStrict)
1129 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1130 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1131 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1132 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1133 }
1134 }
1135
1136 /* Update the state and probably return. */
1137 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1138 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1139 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1140
1141 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1142 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1143 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1144 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1145 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1146 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1147 pVCpu->iem.s.pbInstrBuf = NULL;
1148 if (cbToRead == cbDst)
1149 return;
1150 Assert(cbToRead == cbMaxRead);
1151 }
1152
1153 /*
1154 * More to read, loop.
1155 */
1156 cbDst -= cbMaxRead;
1157 pvDst = (uint8_t *)pvDst + cbMaxRead;
1158 }
1159# else /* !IN_RING3 */
1160 RT_NOREF(pvDst, cbDst);
1161 if (pvDst || cbDst)
1162 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1163# endif /* !IN_RING3 */
1164}
1165
1166#else /* !IEM_WITH_CODE_TLB */
1167
1168/**
1169 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1170 * exception if it fails.
1171 *
1172 * @returns Strict VBox status code.
1173 * @param pVCpu The cross context virtual CPU structure of the
1174 * calling thread.
1175 * @param cbMin The minimum number of bytes relative offOpcode
1176 * that must be read.
1177 */
1178VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1179{
1180 /*
1181 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1182 *
1183 * First translate CS:rIP to a physical address.
1184 */
1185 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1186 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1187 uint8_t const cbLeft = cbOpcode - offOpcode;
1188 Assert(cbLeft < cbMin);
1189 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1190
1191 uint32_t cbToTryRead;
1192 RTGCPTR GCPtrNext;
1193 if (IEM_IS_64BIT_CODE(pVCpu))
1194 {
1195 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1196 if (!IEM_IS_CANONICAL(GCPtrNext))
1197 return iemRaiseGeneralProtectionFault0(pVCpu);
1198 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1199 }
1200 else
1201 {
1202 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1203 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1204 GCPtrNext32 += cbOpcode;
1205 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1206 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1207 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1208 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1209 if (!cbToTryRead) /* overflowed */
1210 {
1211 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1212 cbToTryRead = UINT32_MAX;
1213 /** @todo check out wrapping around the code segment. */
1214 }
1215 if (cbToTryRead < cbMin - cbLeft)
1216 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1217 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1218
1219 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1220 if (cbToTryRead > cbLeftOnPage)
1221 cbToTryRead = cbLeftOnPage;
1222 }
1223
1224 /* Restrict to opcode buffer space.
1225
1226 We're making ASSUMPTIONS here based on work done previously in
1227 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1228 be fetched in case of an instruction crossing two pages. */
1229 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1230 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1231 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1232 { /* likely */ }
1233 else
1234 {
1235 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1236 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1237 return iemRaiseGeneralProtectionFault0(pVCpu);
1238 }
1239
1240 PGMPTWALK Walk;
1241 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1242 if (RT_FAILURE(rc))
1243 {
1244 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1245#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1246 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1247 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1248#endif
1249 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1250 }
1251 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1252 {
1253 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1254#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1255 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1256 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1257#endif
1258 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1259 }
1260 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1261 {
1262 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1263#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1264 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1265 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1266#endif
1267 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1268 }
1269 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1270 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1271 /** @todo Check reserved bits and such stuff. PGM is better at doing
1272 * that, so do it when implementing the guest virtual address
1273 * TLB... */
1274
1275 /*
1276 * Read the bytes at this address.
1277 *
1278 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1279 * and since PATM should only patch the start of an instruction there
1280 * should be no need to check again here.
1281 */
1282 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1283 {
1284 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1285 cbToTryRead, PGMACCESSORIGIN_IEM);
1286 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1287 { /* likely */ }
1288 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1289 {
1290 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1291 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1292 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1293 }
1294 else
1295 {
1296 Log((RT_SUCCESS(rcStrict)
1297 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1298 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1299 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1300 return rcStrict;
1301 }
1302 }
1303 else
1304 {
1305 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1306 if (RT_SUCCESS(rc))
1307 { /* likely */ }
1308 else
1309 {
1310 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1311 return rc;
1312 }
1313 }
1314 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1315 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1316
1317 return VINF_SUCCESS;
1318}
1319
1320#endif /* !IEM_WITH_CODE_TLB */
1321#ifndef IEM_WITH_SETJMP
1322
1323/**
1324 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1325 *
1326 * @returns Strict VBox status code.
1327 * @param pVCpu The cross context virtual CPU structure of the
1328 * calling thread.
1329 * @param pb Where to return the opcode byte.
1330 */
1331VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1332{
1333 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1334 if (rcStrict == VINF_SUCCESS)
1335 {
1336 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1337 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1338 pVCpu->iem.s.offOpcode = offOpcode + 1;
1339 }
1340 else
1341 *pb = 0;
1342 return rcStrict;
1343}
1344
1345#else /* IEM_WITH_SETJMP */
1346
1347/**
1348 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1349 *
1350 * @returns The opcode byte.
1351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1352 */
1353uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1354{
1355# ifdef IEM_WITH_CODE_TLB
1356 uint8_t u8;
1357 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1358 return u8;
1359# else
1360 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1361 if (rcStrict == VINF_SUCCESS)
1362 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1363 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1364# endif
1365}
1366
1367#endif /* IEM_WITH_SETJMP */
1368
1369#ifndef IEM_WITH_SETJMP
1370
1371/**
1372 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1373 *
1374 * @returns Strict VBox status code.
1375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1376 * @param pu16 Where to return the opcode dword.
1377 */
1378VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1379{
1380 uint8_t u8;
1381 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1382 if (rcStrict == VINF_SUCCESS)
1383 *pu16 = (int8_t)u8;
1384 return rcStrict;
1385}
1386
1387
1388/**
1389 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1390 *
1391 * @returns Strict VBox status code.
1392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1393 * @param pu32 Where to return the opcode dword.
1394 */
1395VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1396{
1397 uint8_t u8;
1398 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1399 if (rcStrict == VINF_SUCCESS)
1400 *pu32 = (int8_t)u8;
1401 return rcStrict;
1402}
1403
1404
1405/**
1406 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1407 *
1408 * @returns Strict VBox status code.
1409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1410 * @param pu64 Where to return the opcode qword.
1411 */
1412VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1413{
1414 uint8_t u8;
1415 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1416 if (rcStrict == VINF_SUCCESS)
1417 *pu64 = (int8_t)u8;
1418 return rcStrict;
1419}
1420
1421#endif /* !IEM_WITH_SETJMP */
1422
1423
1424#ifndef IEM_WITH_SETJMP
1425
1426/**
1427 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1428 *
1429 * @returns Strict VBox status code.
1430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1431 * @param pu16 Where to return the opcode word.
1432 */
1433VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1434{
1435 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1436 if (rcStrict == VINF_SUCCESS)
1437 {
1438 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1439# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1440 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1441# else
1442 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1443# endif
1444 pVCpu->iem.s.offOpcode = offOpcode + 2;
1445 }
1446 else
1447 *pu16 = 0;
1448 return rcStrict;
1449}
1450
1451#else /* IEM_WITH_SETJMP */
1452
1453/**
1454 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1455 *
1456 * @returns The opcode word.
1457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1458 */
1459uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1460{
1461# ifdef IEM_WITH_CODE_TLB
1462 uint16_t u16;
1463 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1464 return u16;
1465# else
1466 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1467 if (rcStrict == VINF_SUCCESS)
1468 {
1469 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1470 pVCpu->iem.s.offOpcode += 2;
1471# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1472 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1473# else
1474 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1475# endif
1476 }
1477 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1478# endif
1479}
1480
1481#endif /* IEM_WITH_SETJMP */
1482
1483#ifndef IEM_WITH_SETJMP
1484
1485/**
1486 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1487 *
1488 * @returns Strict VBox status code.
1489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1490 * @param pu32 Where to return the opcode double word.
1491 */
1492VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1493{
1494 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1495 if (rcStrict == VINF_SUCCESS)
1496 {
1497 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1498 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1499 pVCpu->iem.s.offOpcode = offOpcode + 2;
1500 }
1501 else
1502 *pu32 = 0;
1503 return rcStrict;
1504}
1505
1506
1507/**
1508 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1509 *
1510 * @returns Strict VBox status code.
1511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1512 * @param pu64 Where to return the opcode quad word.
1513 */
1514VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1515{
1516 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1517 if (rcStrict == VINF_SUCCESS)
1518 {
1519 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1520 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1521 pVCpu->iem.s.offOpcode = offOpcode + 2;
1522 }
1523 else
1524 *pu64 = 0;
1525 return rcStrict;
1526}
1527
1528#endif /* !IEM_WITH_SETJMP */
1529
1530#ifndef IEM_WITH_SETJMP
1531
1532/**
1533 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1534 *
1535 * @returns Strict VBox status code.
1536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1537 * @param pu32 Where to return the opcode dword.
1538 */
1539VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1540{
1541 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1542 if (rcStrict == VINF_SUCCESS)
1543 {
1544 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1545# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1546 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1547# else
1548 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1549 pVCpu->iem.s.abOpcode[offOpcode + 1],
1550 pVCpu->iem.s.abOpcode[offOpcode + 2],
1551 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1552# endif
1553 pVCpu->iem.s.offOpcode = offOpcode + 4;
1554 }
1555 else
1556 *pu32 = 0;
1557 return rcStrict;
1558}
1559
1560#else /* IEM_WITH_SETJMP */
1561
1562/**
1563 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1564 *
1565 * @returns The opcode dword.
1566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1567 */
1568uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1569{
1570# ifdef IEM_WITH_CODE_TLB
1571 uint32_t u32;
1572 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1573 return u32;
1574# else
1575 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1576 if (rcStrict == VINF_SUCCESS)
1577 {
1578 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1579 pVCpu->iem.s.offOpcode = offOpcode + 4;
1580# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1581 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1582# else
1583 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1584 pVCpu->iem.s.abOpcode[offOpcode + 1],
1585 pVCpu->iem.s.abOpcode[offOpcode + 2],
1586 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1587# endif
1588 }
1589 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1590# endif
1591}
1592
1593#endif /* IEM_WITH_SETJMP */
1594
1595#ifndef IEM_WITH_SETJMP
1596
1597/**
1598 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1599 *
1600 * @returns Strict VBox status code.
1601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1602 * @param pu64 Where to return the opcode dword.
1603 */
1604VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1605{
1606 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1607 if (rcStrict == VINF_SUCCESS)
1608 {
1609 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1610 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1611 pVCpu->iem.s.abOpcode[offOpcode + 1],
1612 pVCpu->iem.s.abOpcode[offOpcode + 2],
1613 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1614 pVCpu->iem.s.offOpcode = offOpcode + 4;
1615 }
1616 else
1617 *pu64 = 0;
1618 return rcStrict;
1619}
1620
1621
1622/**
1623 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1624 *
1625 * @returns Strict VBox status code.
1626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1627 * @param pu64 Where to return the opcode qword.
1628 */
1629VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1630{
1631 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1632 if (rcStrict == VINF_SUCCESS)
1633 {
1634 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1635 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1636 pVCpu->iem.s.abOpcode[offOpcode + 1],
1637 pVCpu->iem.s.abOpcode[offOpcode + 2],
1638 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1639 pVCpu->iem.s.offOpcode = offOpcode + 4;
1640 }
1641 else
1642 *pu64 = 0;
1643 return rcStrict;
1644}
1645
1646#endif /* !IEM_WITH_SETJMP */
1647
1648#ifndef IEM_WITH_SETJMP
1649
1650/**
1651 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1652 *
1653 * @returns Strict VBox status code.
1654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1655 * @param pu64 Where to return the opcode qword.
1656 */
1657VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1658{
1659 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1660 if (rcStrict == VINF_SUCCESS)
1661 {
1662 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1663# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1664 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1665# else
1666 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1667 pVCpu->iem.s.abOpcode[offOpcode + 1],
1668 pVCpu->iem.s.abOpcode[offOpcode + 2],
1669 pVCpu->iem.s.abOpcode[offOpcode + 3],
1670 pVCpu->iem.s.abOpcode[offOpcode + 4],
1671 pVCpu->iem.s.abOpcode[offOpcode + 5],
1672 pVCpu->iem.s.abOpcode[offOpcode + 6],
1673 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1674# endif
1675 pVCpu->iem.s.offOpcode = offOpcode + 8;
1676 }
1677 else
1678 *pu64 = 0;
1679 return rcStrict;
1680}
1681
1682#else /* IEM_WITH_SETJMP */
1683
1684/**
1685 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1686 *
1687 * @returns The opcode qword.
1688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1689 */
1690uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1691{
1692# ifdef IEM_WITH_CODE_TLB
1693 uint64_t u64;
1694 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1695 return u64;
1696# else
1697 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1698 if (rcStrict == VINF_SUCCESS)
1699 {
1700 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1701 pVCpu->iem.s.offOpcode = offOpcode + 8;
1702# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1703 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1704# else
1705 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1706 pVCpu->iem.s.abOpcode[offOpcode + 1],
1707 pVCpu->iem.s.abOpcode[offOpcode + 2],
1708 pVCpu->iem.s.abOpcode[offOpcode + 3],
1709 pVCpu->iem.s.abOpcode[offOpcode + 4],
1710 pVCpu->iem.s.abOpcode[offOpcode + 5],
1711 pVCpu->iem.s.abOpcode[offOpcode + 6],
1712 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1713# endif
1714 }
1715 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1716# endif
1717}
1718
1719#endif /* IEM_WITH_SETJMP */
1720
1721
1722
1723/** @name Misc Worker Functions.
1724 * @{
1725 */
1726
1727/**
1728 * Gets the exception class for the specified exception vector.
1729 *
1730 * @returns The class of the specified exception.
1731 * @param uVector The exception vector.
1732 */
1733static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1734{
1735 Assert(uVector <= X86_XCPT_LAST);
1736 switch (uVector)
1737 {
1738 case X86_XCPT_DE:
1739 case X86_XCPT_TS:
1740 case X86_XCPT_NP:
1741 case X86_XCPT_SS:
1742 case X86_XCPT_GP:
1743 case X86_XCPT_SX: /* AMD only */
1744 return IEMXCPTCLASS_CONTRIBUTORY;
1745
1746 case X86_XCPT_PF:
1747 case X86_XCPT_VE: /* Intel only */
1748 return IEMXCPTCLASS_PAGE_FAULT;
1749
1750 case X86_XCPT_DF:
1751 return IEMXCPTCLASS_DOUBLE_FAULT;
1752 }
1753 return IEMXCPTCLASS_BENIGN;
1754}
1755
1756
1757/**
1758 * Evaluates how to handle an exception caused during delivery of another event
1759 * (exception / interrupt).
1760 *
1761 * @returns How to handle the recursive exception.
1762 * @param pVCpu The cross context virtual CPU structure of the
1763 * calling thread.
1764 * @param fPrevFlags The flags of the previous event.
1765 * @param uPrevVector The vector of the previous event.
1766 * @param fCurFlags The flags of the current exception.
1767 * @param uCurVector The vector of the current exception.
1768 * @param pfXcptRaiseInfo Where to store additional information about the
1769 * exception condition. Optional.
1770 */
1771VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1772 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1773{
1774 /*
1775 * Only CPU exceptions can be raised while delivering other events, software interrupt
1776 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1777 */
1778 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1779 Assert(pVCpu); RT_NOREF(pVCpu);
1780 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1781
1782 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1783 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1784 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1785 {
1786 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1787 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1788 {
1789 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1790 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1791 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1792 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1793 {
1794 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1795 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1796 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1797 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1798 uCurVector, pVCpu->cpum.GstCtx.cr2));
1799 }
1800 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1801 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1802 {
1803 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1804 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1805 }
1806 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1807 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1808 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1809 {
1810 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1811 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1812 }
1813 }
1814 else
1815 {
1816 if (uPrevVector == X86_XCPT_NMI)
1817 {
1818 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1819 if (uCurVector == X86_XCPT_PF)
1820 {
1821 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1822 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1823 }
1824 }
1825 else if ( uPrevVector == X86_XCPT_AC
1826 && uCurVector == X86_XCPT_AC)
1827 {
1828 enmRaise = IEMXCPTRAISE_CPU_HANG;
1829 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1830 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1831 }
1832 }
1833 }
1834 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1835 {
1836 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1837 if (uCurVector == X86_XCPT_PF)
1838 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1839 }
1840 else
1841 {
1842 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1843 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1844 }
1845
1846 if (pfXcptRaiseInfo)
1847 *pfXcptRaiseInfo = fRaiseInfo;
1848 return enmRaise;
1849}
1850
1851
1852/**
1853 * Enters the CPU shutdown state initiated by a triple fault or other
1854 * unrecoverable conditions.
1855 *
1856 * @returns Strict VBox status code.
1857 * @param pVCpu The cross context virtual CPU structure of the
1858 * calling thread.
1859 */
1860static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1861{
1862 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1863 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1864
1865 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1866 {
1867 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1868 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1869 }
1870
1871 RT_NOREF(pVCpu);
1872 return VINF_EM_TRIPLE_FAULT;
1873}
1874
1875
1876/**
1877 * Validates a new SS segment.
1878 *
1879 * @returns VBox strict status code.
1880 * @param pVCpu The cross context virtual CPU structure of the
1881 * calling thread.
1882 * @param NewSS The new SS selctor.
1883 * @param uCpl The CPL to load the stack for.
1884 * @param pDesc Where to return the descriptor.
1885 */
1886static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1887{
1888 /* Null selectors are not allowed (we're not called for dispatching
1889 interrupts with SS=0 in long mode). */
1890 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1891 {
1892 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1893 return iemRaiseTaskSwitchFault0(pVCpu);
1894 }
1895
1896 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1897 if ((NewSS & X86_SEL_RPL) != uCpl)
1898 {
1899 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1900 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1901 }
1902
1903 /*
1904 * Read the descriptor.
1905 */
1906 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1907 if (rcStrict != VINF_SUCCESS)
1908 return rcStrict;
1909
1910 /*
1911 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1912 */
1913 if (!pDesc->Legacy.Gen.u1DescType)
1914 {
1915 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1916 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1917 }
1918
1919 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1920 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1921 {
1922 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1923 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1924 }
1925 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1926 {
1927 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1928 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1929 }
1930
1931 /* Is it there? */
1932 /** @todo testcase: Is this checked before the canonical / limit check below? */
1933 if (!pDesc->Legacy.Gen.u1Present)
1934 {
1935 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1936 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1937 }
1938
1939 return VINF_SUCCESS;
1940}
1941
1942/** @} */
1943
1944
1945/** @name Raising Exceptions.
1946 *
1947 * @{
1948 */
1949
1950
1951/**
1952 * Loads the specified stack far pointer from the TSS.
1953 *
1954 * @returns VBox strict status code.
1955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1956 * @param uCpl The CPL to load the stack for.
1957 * @param pSelSS Where to return the new stack segment.
1958 * @param puEsp Where to return the new stack pointer.
1959 */
1960static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1961{
1962 VBOXSTRICTRC rcStrict;
1963 Assert(uCpl < 4);
1964
1965 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1966 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1967 {
1968 /*
1969 * 16-bit TSS (X86TSS16).
1970 */
1971 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1972 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1973 {
1974 uint32_t off = uCpl * 4 + 2;
1975 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1976 {
1977 /** @todo check actual access pattern here. */
1978 uint32_t u32Tmp = 0; /* gcc maybe... */
1979 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1980 if (rcStrict == VINF_SUCCESS)
1981 {
1982 *puEsp = RT_LOWORD(u32Tmp);
1983 *pSelSS = RT_HIWORD(u32Tmp);
1984 return VINF_SUCCESS;
1985 }
1986 }
1987 else
1988 {
1989 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1990 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1991 }
1992 break;
1993 }
1994
1995 /*
1996 * 32-bit TSS (X86TSS32).
1997 */
1998 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1999 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2000 {
2001 uint32_t off = uCpl * 8 + 4;
2002 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
2003 {
2004/** @todo check actual access pattern here. */
2005 uint64_t u64Tmp;
2006 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2007 if (rcStrict == VINF_SUCCESS)
2008 {
2009 *puEsp = u64Tmp & UINT32_MAX;
2010 *pSelSS = (RTSEL)(u64Tmp >> 32);
2011 return VINF_SUCCESS;
2012 }
2013 }
2014 else
2015 {
2016 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
2017 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2018 }
2019 break;
2020 }
2021
2022 default:
2023 AssertFailed();
2024 rcStrict = VERR_IEM_IPE_4;
2025 break;
2026 }
2027
2028 *puEsp = 0; /* make gcc happy */
2029 *pSelSS = 0; /* make gcc happy */
2030 return rcStrict;
2031}
2032
2033
2034/**
2035 * Loads the specified stack pointer from the 64-bit TSS.
2036 *
2037 * @returns VBox strict status code.
2038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2039 * @param uCpl The CPL to load the stack for.
2040 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2041 * @param puRsp Where to return the new stack pointer.
2042 */
2043static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2044{
2045 Assert(uCpl < 4);
2046 Assert(uIst < 8);
2047 *puRsp = 0; /* make gcc happy */
2048
2049 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2050 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2051
2052 uint32_t off;
2053 if (uIst)
2054 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2055 else
2056 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2057 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2058 {
2059 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2060 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2061 }
2062
2063 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2064}
2065
2066
2067/**
2068 * Adjust the CPU state according to the exception being raised.
2069 *
2070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2071 * @param u8Vector The exception that has been raised.
2072 */
2073DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2074{
2075 switch (u8Vector)
2076 {
2077 case X86_XCPT_DB:
2078 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2079 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2080 break;
2081 /** @todo Read the AMD and Intel exception reference... */
2082 }
2083}
2084
2085
2086/**
2087 * Implements exceptions and interrupts for real mode.
2088 *
2089 * @returns VBox strict status code.
2090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2091 * @param cbInstr The number of bytes to offset rIP by in the return
2092 * address.
2093 * @param u8Vector The interrupt / exception vector number.
2094 * @param fFlags The flags.
2095 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2096 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2097 */
2098static VBOXSTRICTRC
2099iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2100 uint8_t cbInstr,
2101 uint8_t u8Vector,
2102 uint32_t fFlags,
2103 uint16_t uErr,
2104 uint64_t uCr2) RT_NOEXCEPT
2105{
2106 NOREF(uErr); NOREF(uCr2);
2107 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2108
2109 /*
2110 * Read the IDT entry.
2111 */
2112 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2113 {
2114 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2115 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2116 }
2117 RTFAR16 Idte;
2118 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2119 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2120 {
2121 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2122 return rcStrict;
2123 }
2124
2125#ifdef LOG_ENABLED
2126 /* If software interrupt, try decode it if logging is enabled and such. */
2127 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2128 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
2129 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
2130#endif
2131
2132 /*
2133 * Push the stack frame.
2134 */
2135 uint8_t bUnmapInfo;
2136 uint16_t *pu16Frame;
2137 uint64_t uNewRsp;
2138 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
2139 if (rcStrict != VINF_SUCCESS)
2140 return rcStrict;
2141
2142 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2143#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2144 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2145 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2146 fEfl |= UINT16_C(0xf000);
2147#endif
2148 pu16Frame[2] = (uint16_t)fEfl;
2149 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2150 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2151 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2152 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2153 return rcStrict;
2154
2155 /*
2156 * Load the vector address into cs:ip and make exception specific state
2157 * adjustments.
2158 */
2159 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2160 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2161 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2162 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2163 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2164 pVCpu->cpum.GstCtx.rip = Idte.off;
2165 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2166 IEMMISC_SET_EFL(pVCpu, fEfl);
2167
2168 /** @todo do we actually do this in real mode? */
2169 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2170 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2171
2172 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2173 so best leave them alone in case we're in a weird kind of real mode... */
2174
2175 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2176}
2177
2178
2179/**
2180 * Loads a NULL data selector into when coming from V8086 mode.
2181 *
2182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2183 * @param pSReg Pointer to the segment register.
2184 */
2185DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2186{
2187 pSReg->Sel = 0;
2188 pSReg->ValidSel = 0;
2189 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2190 {
2191 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2192 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2193 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2194 }
2195 else
2196 {
2197 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2198 /** @todo check this on AMD-V */
2199 pSReg->u64Base = 0;
2200 pSReg->u32Limit = 0;
2201 }
2202}
2203
2204
2205/**
2206 * Loads a segment selector during a task switch in V8086 mode.
2207 *
2208 * @param pSReg Pointer to the segment register.
2209 * @param uSel The selector value to load.
2210 */
2211DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2212{
2213 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2214 pSReg->Sel = uSel;
2215 pSReg->ValidSel = uSel;
2216 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2217 pSReg->u64Base = uSel << 4;
2218 pSReg->u32Limit = 0xffff;
2219 pSReg->Attr.u = 0xf3;
2220}
2221
2222
2223/**
2224 * Loads a segment selector during a task switch in protected mode.
2225 *
2226 * In this task switch scenario, we would throw \#TS exceptions rather than
2227 * \#GPs.
2228 *
2229 * @returns VBox strict status code.
2230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2231 * @param pSReg Pointer to the segment register.
2232 * @param uSel The new selector value.
2233 *
2234 * @remarks This does _not_ handle CS or SS.
2235 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2236 */
2237static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2238{
2239 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2240
2241 /* Null data selector. */
2242 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2243 {
2244 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2245 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2246 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2247 return VINF_SUCCESS;
2248 }
2249
2250 /* Fetch the descriptor. */
2251 IEMSELDESC Desc;
2252 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2253 if (rcStrict != VINF_SUCCESS)
2254 {
2255 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2256 VBOXSTRICTRC_VAL(rcStrict)));
2257 return rcStrict;
2258 }
2259
2260 /* Must be a data segment or readable code segment. */
2261 if ( !Desc.Legacy.Gen.u1DescType
2262 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2263 {
2264 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2265 Desc.Legacy.Gen.u4Type));
2266 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2267 }
2268
2269 /* Check privileges for data segments and non-conforming code segments. */
2270 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2271 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2272 {
2273 /* The RPL and the new CPL must be less than or equal to the DPL. */
2274 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2275 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2276 {
2277 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2278 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2279 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2280 }
2281 }
2282
2283 /* Is it there? */
2284 if (!Desc.Legacy.Gen.u1Present)
2285 {
2286 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2287 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2288 }
2289
2290 /* The base and limit. */
2291 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2292 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2293
2294 /*
2295 * Ok, everything checked out fine. Now set the accessed bit before
2296 * committing the result into the registers.
2297 */
2298 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2299 {
2300 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2301 if (rcStrict != VINF_SUCCESS)
2302 return rcStrict;
2303 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2304 }
2305
2306 /* Commit */
2307 pSReg->Sel = uSel;
2308 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2309 pSReg->u32Limit = cbLimit;
2310 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2311 pSReg->ValidSel = uSel;
2312 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2313 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2314 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2315
2316 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2317 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2318 return VINF_SUCCESS;
2319}
2320
2321
2322/**
2323 * Performs a task switch.
2324 *
2325 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2326 * caller is responsible for performing the necessary checks (like DPL, TSS
2327 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2328 * reference for JMP, CALL, IRET.
2329 *
2330 * If the task switch is the due to a software interrupt or hardware exception,
2331 * the caller is responsible for validating the TSS selector and descriptor. See
2332 * Intel Instruction reference for INT n.
2333 *
2334 * @returns VBox strict status code.
2335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2336 * @param enmTaskSwitch The cause of the task switch.
2337 * @param uNextEip The EIP effective after the task switch.
2338 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2339 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2340 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2341 * @param SelTss The TSS selector of the new task.
2342 * @param pNewDescTss Pointer to the new TSS descriptor.
2343 */
2344VBOXSTRICTRC
2345iemTaskSwitch(PVMCPUCC pVCpu,
2346 IEMTASKSWITCH enmTaskSwitch,
2347 uint32_t uNextEip,
2348 uint32_t fFlags,
2349 uint16_t uErr,
2350 uint64_t uCr2,
2351 RTSEL SelTss,
2352 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
2353{
2354 Assert(!IEM_IS_REAL_MODE(pVCpu));
2355 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2356 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2357
2358 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
2359 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2360 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2361 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2362 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2363
2364 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2365 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2366
2367 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
2368 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
2369
2370 /* Update CR2 in case it's a page-fault. */
2371 /** @todo This should probably be done much earlier in IEM/PGM. See
2372 * @bugref{5653#c49}. */
2373 if (fFlags & IEM_XCPT_FLAGS_CR2)
2374 pVCpu->cpum.GstCtx.cr2 = uCr2;
2375
2376 /*
2377 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2378 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2379 */
2380 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
2381 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2382 if (uNewTssLimit < uNewTssLimitMin)
2383 {
2384 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
2385 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
2386 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2387 }
2388
2389 /*
2390 * Task switches in VMX non-root mode always cause task switches.
2391 * The new TSS must have been read and validated (DPL, limits etc.) before a
2392 * task-switch VM-exit commences.
2393 *
2394 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2395 */
2396 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2397 {
2398 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
2399 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
2400 }
2401
2402 /*
2403 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2404 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2405 */
2406 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2407 {
2408 uint64_t const uExitInfo1 = SelTss;
2409 uint64_t uExitInfo2 = uErr;
2410 switch (enmTaskSwitch)
2411 {
2412 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2413 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2414 default: break;
2415 }
2416 if (fFlags & IEM_XCPT_FLAGS_ERR)
2417 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2418 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2419 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2420
2421 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2422 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2423 RT_NOREF2(uExitInfo1, uExitInfo2);
2424 }
2425
2426 /*
2427 * Check the current TSS limit. The last written byte to the current TSS during the
2428 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2429 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2430 *
2431 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2432 * end up with smaller than "legal" TSS limits.
2433 */
2434 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2435 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
2436 if (uCurTssLimit < uCurTssLimitMin)
2437 {
2438 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
2439 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
2440 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
2441 }
2442
2443 /*
2444 * Verify that the new TSS can be accessed and map it. Map only the required contents
2445 * and not the entire TSS.
2446 */
2447 uint8_t bUnmapInfoNewTss;
2448 void *pvNewTss;
2449 uint32_t const cbNewTss = uNewTssLimitMin + 1;
2450 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
2451 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2452 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2453 * not perform correct translation if this happens. See Intel spec. 7.2.1
2454 * "Task-State Segment". */
2455 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
2456/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
2457 * Consider wrapping the remainder into a function for simpler cleanup. */
2458 if (rcStrict != VINF_SUCCESS)
2459 {
2460 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
2461 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
2462 return rcStrict;
2463 }
2464
2465 /*
2466 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2467 */
2468 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2469 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2470 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2471 {
2472 uint8_t bUnmapInfoDescCurTss;
2473 PX86DESC pDescCurTss;
2474 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
2475 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2476 if (rcStrict != VINF_SUCCESS)
2477 {
2478 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2479 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2480 return rcStrict;
2481 }
2482
2483 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2484 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
2485 if (rcStrict != VINF_SUCCESS)
2486 {
2487 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2488 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2489 return rcStrict;
2490 }
2491
2492 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2493 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2494 {
2495 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2496 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2497 fEFlags &= ~X86_EFL_NT;
2498 }
2499 }
2500
2501 /*
2502 * Save the CPU state into the current TSS.
2503 */
2504 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
2505 if (GCPtrNewTss == GCPtrCurTss)
2506 {
2507 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
2508 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2509 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2510 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2511 pVCpu->cpum.GstCtx.ldtr.Sel));
2512 }
2513 if (fIsNewTss386)
2514 {
2515 /*
2516 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2517 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2518 */
2519 uint8_t bUnmapInfoCurTss32;
2520 void *pvCurTss32;
2521 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
2522 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2523 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2524 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
2525 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2526 if (rcStrict != VINF_SUCCESS)
2527 {
2528 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2529 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2530 return rcStrict;
2531 }
2532
2533 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2534 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
2535 pCurTss32->eip = uNextEip;
2536 pCurTss32->eflags = fEFlags;
2537 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
2538 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
2539 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
2540 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
2541 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
2542 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
2543 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
2544 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
2545 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
2546 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2547 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2548 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2549 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2550 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2551
2552 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
2553 if (rcStrict != VINF_SUCCESS)
2554 {
2555 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2556 VBOXSTRICTRC_VAL(rcStrict)));
2557 return rcStrict;
2558 }
2559 }
2560 else
2561 {
2562 /*
2563 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2564 */
2565 uint8_t bUnmapInfoCurTss16;
2566 void *pvCurTss16;
2567 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
2568 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2569 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2570 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
2571 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
2572 if (rcStrict != VINF_SUCCESS)
2573 {
2574 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
2575 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
2576 return rcStrict;
2577 }
2578
2579 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
2580 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
2581 pCurTss16->ip = uNextEip;
2582 pCurTss16->flags = (uint16_t)fEFlags;
2583 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
2584 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
2585 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
2586 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
2587 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
2588 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
2589 pCurTss16->si = pVCpu->cpum.GstCtx.si;
2590 pCurTss16->di = pVCpu->cpum.GstCtx.di;
2591 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
2592 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2593 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2594 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2595
2596 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
2597 if (rcStrict != VINF_SUCCESS)
2598 {
2599 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2600 VBOXSTRICTRC_VAL(rcStrict)));
2601 return rcStrict;
2602 }
2603 }
2604
2605 /*
2606 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2607 */
2608 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2609 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2610 {
2611 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2612 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
2613 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2614 }
2615
2616 /*
2617 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2618 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2619 */
2620 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2621 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2622 bool fNewDebugTrap;
2623 if (fIsNewTss386)
2624 {
2625 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
2626 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
2627 uNewEip = pNewTss32->eip;
2628 uNewEflags = pNewTss32->eflags;
2629 uNewEax = pNewTss32->eax;
2630 uNewEcx = pNewTss32->ecx;
2631 uNewEdx = pNewTss32->edx;
2632 uNewEbx = pNewTss32->ebx;
2633 uNewEsp = pNewTss32->esp;
2634 uNewEbp = pNewTss32->ebp;
2635 uNewEsi = pNewTss32->esi;
2636 uNewEdi = pNewTss32->edi;
2637 uNewES = pNewTss32->es;
2638 uNewCS = pNewTss32->cs;
2639 uNewSS = pNewTss32->ss;
2640 uNewDS = pNewTss32->ds;
2641 uNewFS = pNewTss32->fs;
2642 uNewGS = pNewTss32->gs;
2643 uNewLdt = pNewTss32->selLdt;
2644 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
2645 }
2646 else
2647 {
2648 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
2649 uNewCr3 = 0;
2650 uNewEip = pNewTss16->ip;
2651 uNewEflags = pNewTss16->flags;
2652 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
2653 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
2654 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
2655 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
2656 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
2657 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
2658 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
2659 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
2660 uNewES = pNewTss16->es;
2661 uNewCS = pNewTss16->cs;
2662 uNewSS = pNewTss16->ss;
2663 uNewDS = pNewTss16->ds;
2664 uNewFS = 0;
2665 uNewGS = 0;
2666 uNewLdt = pNewTss16->selLdt;
2667 fNewDebugTrap = false;
2668 }
2669
2670 if (GCPtrNewTss == GCPtrCurTss)
2671 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2672 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2673
2674 /*
2675 * We're done accessing the new TSS.
2676 */
2677 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2678 if (rcStrict != VINF_SUCCESS)
2679 {
2680 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2681 return rcStrict;
2682 }
2683
2684 /*
2685 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2686 */
2687 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2688 {
2689 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
2690 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2691 if (rcStrict != VINF_SUCCESS)
2692 {
2693 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2694 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2695 return rcStrict;
2696 }
2697
2698 /* Check that the descriptor indicates the new TSS is available (not busy). */
2699 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2700 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2701 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
2702
2703 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2704 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
2705 if (rcStrict != VINF_SUCCESS)
2706 {
2707 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2708 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2709 return rcStrict;
2710 }
2711 }
2712
2713 /*
2714 * From this point on, we're technically in the new task. We will defer exceptions
2715 * until the completion of the task switch but before executing any instructions in the new task.
2716 */
2717 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
2718 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
2719 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2720 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
2721 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
2722 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
2723 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2724
2725 /* Set the busy bit in TR. */
2726 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2727
2728 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2729 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2730 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2731 {
2732 uNewEflags |= X86_EFL_NT;
2733 }
2734
2735 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2736 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2737 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2738
2739 pVCpu->cpum.GstCtx.eip = uNewEip;
2740 pVCpu->cpum.GstCtx.eax = uNewEax;
2741 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2742 pVCpu->cpum.GstCtx.edx = uNewEdx;
2743 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2744 pVCpu->cpum.GstCtx.esp = uNewEsp;
2745 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2746 pVCpu->cpum.GstCtx.esi = uNewEsi;
2747 pVCpu->cpum.GstCtx.edi = uNewEdi;
2748
2749 uNewEflags &= X86_EFL_LIVE_MASK;
2750 uNewEflags |= X86_EFL_RA1_MASK;
2751 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2752
2753 /*
2754 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2755 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2756 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2757 */
2758 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2759 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2760
2761 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2762 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2763
2764 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2765 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2766
2767 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2768 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2769
2770 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2771 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2772
2773 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2774 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2775 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2776
2777 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2778 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2779 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2780 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2781
2782 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2783 {
2784 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2785 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2786 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2787 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2788 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2789 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2790 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2791 }
2792
2793 /*
2794 * Switch CR3 for the new task.
2795 */
2796 if ( fIsNewTss386
2797 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2798 {
2799 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2800 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2801 AssertRCSuccessReturn(rc, rc);
2802
2803 /* Inform PGM. */
2804 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2805 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2806 AssertRCReturn(rc, rc);
2807 /* ignore informational status codes */
2808
2809 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2810 }
2811
2812 /*
2813 * Switch LDTR for the new task.
2814 */
2815 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2816 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2817 else
2818 {
2819 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2820
2821 IEMSELDESC DescNewLdt;
2822 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2823 if (rcStrict != VINF_SUCCESS)
2824 {
2825 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2826 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2827 return rcStrict;
2828 }
2829 if ( !DescNewLdt.Legacy.Gen.u1Present
2830 || DescNewLdt.Legacy.Gen.u1DescType
2831 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2832 {
2833 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2834 uNewLdt, DescNewLdt.Legacy.u));
2835 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2836 }
2837
2838 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2839 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2840 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2841 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2842 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2843 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2844 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2846 }
2847
2848 IEMSELDESC DescSS;
2849 if (IEM_IS_V86_MODE(pVCpu))
2850 {
2851 IEM_SET_CPL(pVCpu, 3);
2852 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2853 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2854 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2855 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2856 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2857 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2858
2859 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2860 DescSS.Legacy.u = 0;
2861 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2862 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2863 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2864 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2865 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2866 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2867 DescSS.Legacy.Gen.u2Dpl = 3;
2868 }
2869 else
2870 {
2871 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2872
2873 /*
2874 * Load the stack segment for the new task.
2875 */
2876 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2877 {
2878 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2879 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2880 }
2881
2882 /* Fetch the descriptor. */
2883 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2884 if (rcStrict != VINF_SUCCESS)
2885 {
2886 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2887 VBOXSTRICTRC_VAL(rcStrict)));
2888 return rcStrict;
2889 }
2890
2891 /* SS must be a data segment and writable. */
2892 if ( !DescSS.Legacy.Gen.u1DescType
2893 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2894 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2895 {
2896 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2897 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2898 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2899 }
2900
2901 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2902 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2903 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2904 {
2905 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2906 uNewCpl));
2907 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2908 }
2909
2910 /* Is it there? */
2911 if (!DescSS.Legacy.Gen.u1Present)
2912 {
2913 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2914 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2915 }
2916
2917 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2918 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2919
2920 /* Set the accessed bit before committing the result into SS. */
2921 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2922 {
2923 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2924 if (rcStrict != VINF_SUCCESS)
2925 return rcStrict;
2926 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2927 }
2928
2929 /* Commit SS. */
2930 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2931 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2932 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2933 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2934 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2935 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2936 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2937
2938 /* CPL has changed, update IEM before loading rest of segments. */
2939 IEM_SET_CPL(pVCpu, uNewCpl);
2940
2941 /*
2942 * Load the data segments for the new task.
2943 */
2944 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2945 if (rcStrict != VINF_SUCCESS)
2946 return rcStrict;
2947 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2948 if (rcStrict != VINF_SUCCESS)
2949 return rcStrict;
2950 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2951 if (rcStrict != VINF_SUCCESS)
2952 return rcStrict;
2953 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2954 if (rcStrict != VINF_SUCCESS)
2955 return rcStrict;
2956
2957 /*
2958 * Load the code segment for the new task.
2959 */
2960 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2961 {
2962 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2963 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2964 }
2965
2966 /* Fetch the descriptor. */
2967 IEMSELDESC DescCS;
2968 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2969 if (rcStrict != VINF_SUCCESS)
2970 {
2971 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2972 return rcStrict;
2973 }
2974
2975 /* CS must be a code segment. */
2976 if ( !DescCS.Legacy.Gen.u1DescType
2977 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2978 {
2979 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2980 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2981 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2982 }
2983
2984 /* For conforming CS, DPL must be less than or equal to the RPL. */
2985 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2986 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2987 {
2988 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2989 DescCS.Legacy.Gen.u2Dpl));
2990 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2991 }
2992
2993 /* For non-conforming CS, DPL must match RPL. */
2994 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2995 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2996 {
2997 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2998 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2999 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3000 }
3001
3002 /* Is it there? */
3003 if (!DescCS.Legacy.Gen.u1Present)
3004 {
3005 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
3006 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
3007 }
3008
3009 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
3010 u64Base = X86DESC_BASE(&DescCS.Legacy);
3011
3012 /* Set the accessed bit before committing the result into CS. */
3013 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3014 {
3015 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
3016 if (rcStrict != VINF_SUCCESS)
3017 return rcStrict;
3018 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3019 }
3020
3021 /* Commit CS. */
3022 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
3023 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
3024 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3025 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
3026 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
3027 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
3029 }
3030
3031 /* Make sure the CPU mode is correct. */
3032 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3033 if (fExecNew != pVCpu->iem.s.fExec)
3034 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3035 pVCpu->iem.s.fExec = fExecNew;
3036
3037 /** @todo Debug trap. */
3038 if (fIsNewTss386 && fNewDebugTrap)
3039 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
3040
3041 /*
3042 * Construct the error code masks based on what caused this task switch.
3043 * See Intel Instruction reference for INT.
3044 */
3045 uint16_t uExt;
3046 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3047 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3048 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3049 uExt = 1;
3050 else
3051 uExt = 0;
3052
3053 /*
3054 * Push any error code on to the new stack.
3055 */
3056 if (fFlags & IEM_XCPT_FLAGS_ERR)
3057 {
3058 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3059 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3060 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
3061
3062 /* Check that there is sufficient space on the stack. */
3063 /** @todo Factor out segment limit checking for normal/expand down segments
3064 * into a separate function. */
3065 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3066 {
3067 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3068 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3069 {
3070 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3071 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3072 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3073 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3074 }
3075 }
3076 else
3077 {
3078 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3079 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3080 {
3081 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3082 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3083 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3084 }
3085 }
3086
3087
3088 if (fIsNewTss386)
3089 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3090 else
3091 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3092 if (rcStrict != VINF_SUCCESS)
3093 {
3094 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3095 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3096 return rcStrict;
3097 }
3098 }
3099
3100 /* Check the new EIP against the new CS limit. */
3101 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3102 {
3103 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3104 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3105 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3106 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3107 }
3108
3109 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3110 pVCpu->cpum.GstCtx.ss.Sel));
3111 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3112}
3113
3114
3115/**
3116 * Implements exceptions and interrupts for protected mode.
3117 *
3118 * @returns VBox strict status code.
3119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3120 * @param cbInstr The number of bytes to offset rIP by in the return
3121 * address.
3122 * @param u8Vector The interrupt / exception vector number.
3123 * @param fFlags The flags.
3124 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3125 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3126 */
3127static VBOXSTRICTRC
3128iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3129 uint8_t cbInstr,
3130 uint8_t u8Vector,
3131 uint32_t fFlags,
3132 uint16_t uErr,
3133 uint64_t uCr2) RT_NOEXCEPT
3134{
3135 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3136
3137 /*
3138 * Read the IDT entry.
3139 */
3140 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3141 {
3142 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3143 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3144 }
3145 X86DESC Idte;
3146 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3147 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3148 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3149 {
3150 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3151 return rcStrict;
3152 }
3153 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3154 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3155 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3156 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3157
3158 /*
3159 * Check the descriptor type, DPL and such.
3160 * ASSUMES this is done in the same order as described for call-gate calls.
3161 */
3162 if (Idte.Gate.u1DescType)
3163 {
3164 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3165 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3166 }
3167 bool fTaskGate = false;
3168 uint8_t f32BitGate = true;
3169 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3170 switch (Idte.Gate.u4Type)
3171 {
3172 case X86_SEL_TYPE_SYS_UNDEFINED:
3173 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3174 case X86_SEL_TYPE_SYS_LDT:
3175 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3176 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3177 case X86_SEL_TYPE_SYS_UNDEFINED2:
3178 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3179 case X86_SEL_TYPE_SYS_UNDEFINED3:
3180 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3181 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3182 case X86_SEL_TYPE_SYS_UNDEFINED4:
3183 {
3184 /** @todo check what actually happens when the type is wrong...
3185 * esp. call gates. */
3186 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3187 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3188 }
3189
3190 case X86_SEL_TYPE_SYS_286_INT_GATE:
3191 f32BitGate = false;
3192 RT_FALL_THRU();
3193 case X86_SEL_TYPE_SYS_386_INT_GATE:
3194 fEflToClear |= X86_EFL_IF;
3195 break;
3196
3197 case X86_SEL_TYPE_SYS_TASK_GATE:
3198 fTaskGate = true;
3199#ifndef IEM_IMPLEMENTS_TASKSWITCH
3200 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3201#endif
3202 break;
3203
3204 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3205 f32BitGate = false;
3206 break;
3207 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3208 break;
3209
3210 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3211 }
3212
3213 /* Check DPL against CPL if applicable. */
3214 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3215 {
3216 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3217 {
3218 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3219 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3220 }
3221 }
3222
3223 /* Is it there? */
3224 if (!Idte.Gate.u1Present)
3225 {
3226 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3227 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3228 }
3229
3230 /* Is it a task-gate? */
3231 if (fTaskGate)
3232 {
3233 /*
3234 * Construct the error code masks based on what caused this task switch.
3235 * See Intel Instruction reference for INT.
3236 */
3237 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3238 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3239 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3240 RTSEL SelTss = Idte.Gate.u16Sel;
3241
3242 /*
3243 * Fetch the TSS descriptor in the GDT.
3244 */
3245 IEMSELDESC DescTSS;
3246 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
3247 if (rcStrict != VINF_SUCCESS)
3248 {
3249 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
3250 VBOXSTRICTRC_VAL(rcStrict)));
3251 return rcStrict;
3252 }
3253
3254 /* The TSS descriptor must be a system segment and be available (not busy). */
3255 if ( DescTSS.Legacy.Gen.u1DescType
3256 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3257 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3258 {
3259 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3260 u8Vector, SelTss, DescTSS.Legacy.au64));
3261 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
3262 }
3263
3264 /* The TSS must be present. */
3265 if (!DescTSS.Legacy.Gen.u1Present)
3266 {
3267 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
3268 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
3269 }
3270
3271 /* Do the actual task switch. */
3272 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3273 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3274 fFlags, uErr, uCr2, SelTss, &DescTSS);
3275 }
3276
3277 /* A null CS is bad. */
3278 RTSEL NewCS = Idte.Gate.u16Sel;
3279 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3280 {
3281 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3282 return iemRaiseGeneralProtectionFault0(pVCpu);
3283 }
3284
3285 /* Fetch the descriptor for the new CS. */
3286 IEMSELDESC DescCS;
3287 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3288 if (rcStrict != VINF_SUCCESS)
3289 {
3290 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3291 return rcStrict;
3292 }
3293
3294 /* Must be a code segment. */
3295 if (!DescCS.Legacy.Gen.u1DescType)
3296 {
3297 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3298 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3299 }
3300 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3301 {
3302 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3303 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3304 }
3305
3306 /* Don't allow lowering the privilege level. */
3307 /** @todo Does the lowering of privileges apply to software interrupts
3308 * only? This has bearings on the more-privileged or
3309 * same-privilege stack behavior further down. A testcase would
3310 * be nice. */
3311 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3312 {
3313 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3314 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3315 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3316 }
3317
3318 /* Make sure the selector is present. */
3319 if (!DescCS.Legacy.Gen.u1Present)
3320 {
3321 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3322 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3323 }
3324
3325#ifdef LOG_ENABLED
3326 /* If software interrupt, try decode it if logging is enabled and such. */
3327 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3328 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3329 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3330#endif
3331
3332 /* Check the new EIP against the new CS limit. */
3333 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3334 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3335 ? Idte.Gate.u16OffsetLow
3336 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3337 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3338 if (uNewEip > cbLimitCS)
3339 {
3340 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3341 u8Vector, uNewEip, cbLimitCS, NewCS));
3342 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3343 }
3344 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3345
3346 /* Calc the flag image to push. */
3347 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3348 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3349 fEfl &= ~X86_EFL_RF;
3350 else
3351 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3352
3353 /* From V8086 mode only go to CPL 0. */
3354 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3355 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3356 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3357 {
3358 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3359 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3360 }
3361
3362 /*
3363 * If the privilege level changes, we need to get a new stack from the TSS.
3364 * This in turns means validating the new SS and ESP...
3365 */
3366 if (uNewCpl != IEM_GET_CPL(pVCpu))
3367 {
3368 RTSEL NewSS;
3369 uint32_t uNewEsp;
3370 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3371 if (rcStrict != VINF_SUCCESS)
3372 return rcStrict;
3373
3374 IEMSELDESC DescSS;
3375 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3376 if (rcStrict != VINF_SUCCESS)
3377 return rcStrict;
3378 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3379 if (!DescSS.Legacy.Gen.u1DefBig)
3380 {
3381 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3382 uNewEsp = (uint16_t)uNewEsp;
3383 }
3384
3385 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3386
3387 /* Check that there is sufficient space for the stack frame. */
3388 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3389 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3390 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3391 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3392
3393 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3394 {
3395 if ( uNewEsp - 1 > cbLimitSS
3396 || uNewEsp < cbStackFrame)
3397 {
3398 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3399 u8Vector, NewSS, uNewEsp, cbStackFrame));
3400 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3401 }
3402 }
3403 else
3404 {
3405 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3406 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3407 {
3408 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3409 u8Vector, NewSS, uNewEsp, cbStackFrame));
3410 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3411 }
3412 }
3413
3414 /*
3415 * Start making changes.
3416 */
3417
3418 /* Set the new CPL so that stack accesses use it. */
3419 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3420 IEM_SET_CPL(pVCpu, uNewCpl);
3421
3422 /* Create the stack frame. */
3423 uint8_t bUnmapInfoStackFrame;
3424 RTPTRUNION uStackFrame;
3425 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3426 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3427 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3428 if (rcStrict != VINF_SUCCESS)
3429 return rcStrict;
3430 if (f32BitGate)
3431 {
3432 if (fFlags & IEM_XCPT_FLAGS_ERR)
3433 *uStackFrame.pu32++ = uErr;
3434 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3435 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3436 uStackFrame.pu32[2] = fEfl;
3437 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3438 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3439 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3440 if (fEfl & X86_EFL_VM)
3441 {
3442 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3443 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3444 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3445 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3446 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3447 }
3448 }
3449 else
3450 {
3451 if (fFlags & IEM_XCPT_FLAGS_ERR)
3452 *uStackFrame.pu16++ = uErr;
3453 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3454 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3455 uStackFrame.pu16[2] = fEfl;
3456 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3457 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3458 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3459 if (fEfl & X86_EFL_VM)
3460 {
3461 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3462 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3463 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3464 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3465 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3466 }
3467 }
3468 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3469 if (rcStrict != VINF_SUCCESS)
3470 return rcStrict;
3471
3472 /* Mark the selectors 'accessed' (hope this is the correct time). */
3473 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3474 * after pushing the stack frame? (Write protect the gdt + stack to
3475 * find out.) */
3476 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3477 {
3478 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3479 if (rcStrict != VINF_SUCCESS)
3480 return rcStrict;
3481 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3482 }
3483
3484 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3485 {
3486 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3487 if (rcStrict != VINF_SUCCESS)
3488 return rcStrict;
3489 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3490 }
3491
3492 /*
3493 * Start comitting the register changes (joins with the DPL=CPL branch).
3494 */
3495 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3496 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3497 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3498 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3499 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3500 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3501 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3502 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3503 * SP is loaded).
3504 * Need to check the other combinations too:
3505 * - 16-bit TSS, 32-bit handler
3506 * - 32-bit TSS, 16-bit handler */
3507 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3508 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3509 else
3510 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3511
3512 if (fEfl & X86_EFL_VM)
3513 {
3514 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3515 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3516 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3517 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3518 }
3519 }
3520 /*
3521 * Same privilege, no stack change and smaller stack frame.
3522 */
3523 else
3524 {
3525 uint64_t uNewRsp;
3526 uint8_t bUnmapInfoStackFrame;
3527 RTPTRUNION uStackFrame;
3528 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3529 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
3530 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
3531 if (rcStrict != VINF_SUCCESS)
3532 return rcStrict;
3533
3534 if (f32BitGate)
3535 {
3536 if (fFlags & IEM_XCPT_FLAGS_ERR)
3537 *uStackFrame.pu32++ = uErr;
3538 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3539 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3540 uStackFrame.pu32[2] = fEfl;
3541 }
3542 else
3543 {
3544 if (fFlags & IEM_XCPT_FLAGS_ERR)
3545 *uStackFrame.pu16++ = uErr;
3546 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3547 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3548 uStackFrame.pu16[2] = fEfl;
3549 }
3550 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
3551 if (rcStrict != VINF_SUCCESS)
3552 return rcStrict;
3553
3554 /* Mark the CS selector as 'accessed'. */
3555 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3556 {
3557 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3558 if (rcStrict != VINF_SUCCESS)
3559 return rcStrict;
3560 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3561 }
3562
3563 /*
3564 * Start committing the register changes (joins with the other branch).
3565 */
3566 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3567 }
3568
3569 /* ... register committing continues. */
3570 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3571 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3572 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3573 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3574 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3575 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3576
3577 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3578 fEfl &= ~fEflToClear;
3579 IEMMISC_SET_EFL(pVCpu, fEfl);
3580
3581 if (fFlags & IEM_XCPT_FLAGS_CR2)
3582 pVCpu->cpum.GstCtx.cr2 = uCr2;
3583
3584 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3585 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3586
3587 /* Make sure the execution flags are correct. */
3588 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3589 if (fExecNew != pVCpu->iem.s.fExec)
3590 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3591 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3592 pVCpu->iem.s.fExec = fExecNew;
3593 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3594
3595 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3596}
3597
3598
3599/**
3600 * Implements exceptions and interrupts for long mode.
3601 *
3602 * @returns VBox strict status code.
3603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3604 * @param cbInstr The number of bytes to offset rIP by in the return
3605 * address.
3606 * @param u8Vector The interrupt / exception vector number.
3607 * @param fFlags The flags.
3608 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3609 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3610 */
3611static VBOXSTRICTRC
3612iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3613 uint8_t cbInstr,
3614 uint8_t u8Vector,
3615 uint32_t fFlags,
3616 uint16_t uErr,
3617 uint64_t uCr2) RT_NOEXCEPT
3618{
3619 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3620
3621 /*
3622 * Read the IDT entry.
3623 */
3624 uint16_t offIdt = (uint16_t)u8Vector << 4;
3625 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3626 {
3627 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3628 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3629 }
3630 X86DESC64 Idte;
3631#ifdef _MSC_VER /* Shut up silly compiler warning. */
3632 Idte.au64[0] = 0;
3633 Idte.au64[1] = 0;
3634#endif
3635 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3636 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3637 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3638 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3639 {
3640 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3641 return rcStrict;
3642 }
3643 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3644 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3645 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3646
3647 /*
3648 * Check the descriptor type, DPL and such.
3649 * ASSUMES this is done in the same order as described for call-gate calls.
3650 */
3651 if (Idte.Gate.u1DescType)
3652 {
3653 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3654 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3655 }
3656 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3657 switch (Idte.Gate.u4Type)
3658 {
3659 case AMD64_SEL_TYPE_SYS_INT_GATE:
3660 fEflToClear |= X86_EFL_IF;
3661 break;
3662 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3663 break;
3664
3665 default:
3666 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3667 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3668 }
3669
3670 /* Check DPL against CPL if applicable. */
3671 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3672 {
3673 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3674 {
3675 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3676 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3677 }
3678 }
3679
3680 /* Is it there? */
3681 if (!Idte.Gate.u1Present)
3682 {
3683 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3684 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3685 }
3686
3687 /* A null CS is bad. */
3688 RTSEL NewCS = Idte.Gate.u16Sel;
3689 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3690 {
3691 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3692 return iemRaiseGeneralProtectionFault0(pVCpu);
3693 }
3694
3695 /* Fetch the descriptor for the new CS. */
3696 IEMSELDESC DescCS;
3697 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3698 if (rcStrict != VINF_SUCCESS)
3699 {
3700 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3701 return rcStrict;
3702 }
3703
3704 /* Must be a 64-bit code segment. */
3705 if (!DescCS.Long.Gen.u1DescType)
3706 {
3707 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3708 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3709 }
3710 if ( !DescCS.Long.Gen.u1Long
3711 || DescCS.Long.Gen.u1DefBig
3712 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3713 {
3714 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3715 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3716 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3717 }
3718
3719 /* Don't allow lowering the privilege level. For non-conforming CS
3720 selectors, the CS.DPL sets the privilege level the trap/interrupt
3721 handler runs at. For conforming CS selectors, the CPL remains
3722 unchanged, but the CS.DPL must be <= CPL. */
3723 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3724 * when CPU in Ring-0. Result \#GP? */
3725 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3726 {
3727 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3728 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3729 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3730 }
3731
3732
3733 /* Make sure the selector is present. */
3734 if (!DescCS.Legacy.Gen.u1Present)
3735 {
3736 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3737 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3738 }
3739
3740 /* Check that the new RIP is canonical. */
3741 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3742 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3743 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3744 if (!IEM_IS_CANONICAL(uNewRip))
3745 {
3746 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3747 return iemRaiseGeneralProtectionFault0(pVCpu);
3748 }
3749
3750 /*
3751 * If the privilege level changes or if the IST isn't zero, we need to get
3752 * a new stack from the TSS.
3753 */
3754 uint64_t uNewRsp;
3755 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3756 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3757 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3758 || Idte.Gate.u3IST != 0)
3759 {
3760 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3761 if (rcStrict != VINF_SUCCESS)
3762 return rcStrict;
3763 }
3764 else
3765 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3766 uNewRsp &= ~(uint64_t)0xf;
3767
3768 /*
3769 * Calc the flag image to push.
3770 */
3771 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3772 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3773 fEfl &= ~X86_EFL_RF;
3774 else
3775 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3776
3777 /*
3778 * Start making changes.
3779 */
3780 /* Set the new CPL so that stack accesses use it. */
3781 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3782 IEM_SET_CPL(pVCpu, uNewCpl);
3783/** @todo Setting CPL this early seems wrong as it would affect and errors we
3784 * raise accessing the stack and (?) GDT/LDT... */
3785
3786 /* Create the stack frame. */
3787 uint8_t bUnmapInfoStackFrame;
3788 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3789 RTPTRUNION uStackFrame;
3790 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
3791 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3792 if (rcStrict != VINF_SUCCESS)
3793 return rcStrict;
3794
3795 if (fFlags & IEM_XCPT_FLAGS_ERR)
3796 *uStackFrame.pu64++ = uErr;
3797 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3798 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3799 uStackFrame.pu64[2] = fEfl;
3800 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3801 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3802 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
3803 if (rcStrict != VINF_SUCCESS)
3804 return rcStrict;
3805
3806 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3807 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3808 * after pushing the stack frame? (Write protect the gdt + stack to
3809 * find out.) */
3810 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3811 {
3812 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3813 if (rcStrict != VINF_SUCCESS)
3814 return rcStrict;
3815 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3816 }
3817
3818 /*
3819 * Start comitting the register changes.
3820 */
3821 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3822 * hidden registers when interrupting 32-bit or 16-bit code! */
3823 if (uNewCpl != uOldCpl)
3824 {
3825 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3826 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3827 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3828 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3829 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3830 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3831 }
3832 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3833 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3834 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3835 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3836 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3837 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3838 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3839 pVCpu->cpum.GstCtx.rip = uNewRip;
3840
3841 fEfl &= ~fEflToClear;
3842 IEMMISC_SET_EFL(pVCpu, fEfl);
3843
3844 if (fFlags & IEM_XCPT_FLAGS_CR2)
3845 pVCpu->cpum.GstCtx.cr2 = uCr2;
3846
3847 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3848 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3849
3850 iemRecalcExecModeAndCplFlags(pVCpu);
3851
3852 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3853}
3854
3855
3856/**
3857 * Implements exceptions and interrupts.
3858 *
3859 * All exceptions and interrupts goes thru this function!
3860 *
3861 * @returns VBox strict status code.
3862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3863 * @param cbInstr The number of bytes to offset rIP by in the return
3864 * address.
3865 * @param u8Vector The interrupt / exception vector number.
3866 * @param fFlags The flags.
3867 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3868 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3869 */
3870VBOXSTRICTRC
3871iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3872 uint8_t cbInstr,
3873 uint8_t u8Vector,
3874 uint32_t fFlags,
3875 uint16_t uErr,
3876 uint64_t uCr2) RT_NOEXCEPT
3877{
3878 /*
3879 * Get all the state that we might need here.
3880 */
3881 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3882 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3883
3884#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3885 /*
3886 * Flush prefetch buffer
3887 */
3888 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3889#endif
3890
3891 /*
3892 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3893 */
3894 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3895 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3896 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3897 | IEM_XCPT_FLAGS_BP_INSTR
3898 | IEM_XCPT_FLAGS_ICEBP_INSTR
3899 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3900 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3901 {
3902 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3903 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3904 u8Vector = X86_XCPT_GP;
3905 uErr = 0;
3906 }
3907
3908 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3909#ifdef DBGFTRACE_ENABLED
3910 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3911 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3912 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3913#endif
3914
3915 /*
3916 * Check if DBGF wants to intercept the exception.
3917 */
3918 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
3919 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
3920 { /* likely */ }
3921 else
3922 {
3923 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
3924 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
3925 if (rcStrict != VINF_SUCCESS)
3926 return rcStrict;
3927 }
3928
3929 /*
3930 * Evaluate whether NMI blocking should be in effect.
3931 * Normally, NMI blocking is in effect whenever we inject an NMI.
3932 */
3933 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3934 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3935
3936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3937 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3938 {
3939 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3940 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3941 return rcStrict0;
3942
3943 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3944 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3945 {
3946 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3947 fBlockNmi = false;
3948 }
3949 }
3950#endif
3951
3952#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3953 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3954 {
3955 /*
3956 * If the event is being injected as part of VMRUN, it isn't subject to event
3957 * intercepts in the nested-guest. However, secondary exceptions that occur
3958 * during injection of any event -are- subject to exception intercepts.
3959 *
3960 * See AMD spec. 15.20 "Event Injection".
3961 */
3962 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3963 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3964 else
3965 {
3966 /*
3967 * Check and handle if the event being raised is intercepted.
3968 */
3969 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3970 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3971 return rcStrict0;
3972 }
3973 }
3974#endif
3975
3976 /*
3977 * Set NMI blocking if necessary.
3978 */
3979 if (fBlockNmi)
3980 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3981
3982 /*
3983 * Do recursion accounting.
3984 */
3985 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3986 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3987 if (pVCpu->iem.s.cXcptRecursions == 0)
3988 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3989 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3990 else
3991 {
3992 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3993 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3994 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3995
3996 if (pVCpu->iem.s.cXcptRecursions >= 4)
3997 {
3998#ifdef DEBUG_bird
3999 AssertFailed();
4000#endif
4001 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4002 }
4003
4004 /*
4005 * Evaluate the sequence of recurring events.
4006 */
4007 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
4008 NULL /* pXcptRaiseInfo */);
4009 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
4010 { /* likely */ }
4011 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
4012 {
4013 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
4014 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4015 u8Vector = X86_XCPT_DF;
4016 uErr = 0;
4017#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4018 /* VMX nested-guest #DF intercept needs to be checked here. */
4019 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4020 {
4021 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
4022 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
4023 return rcStrict0;
4024 }
4025#endif
4026 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
4027 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
4028 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
4029 }
4030 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
4031 {
4032 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
4033 return iemInitiateCpuShutdown(pVCpu);
4034 }
4035 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
4036 {
4037 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
4038 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
4039 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
4040 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
4041 return VERR_EM_GUEST_CPU_HANG;
4042 }
4043 else
4044 {
4045 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
4046 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
4047 return VERR_IEM_IPE_9;
4048 }
4049
4050 /*
4051 * The 'EXT' bit is set when an exception occurs during deliver of an external
4052 * event (such as an interrupt or earlier exception)[1]. Privileged software
4053 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
4054 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
4055 *
4056 * [1] - Intel spec. 6.13 "Error Code"
4057 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
4058 * [3] - Intel Instruction reference for INT n.
4059 */
4060 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4061 && (fFlags & IEM_XCPT_FLAGS_ERR)
4062 && u8Vector != X86_XCPT_PF
4063 && u8Vector != X86_XCPT_DF)
4064 {
4065 uErr |= X86_TRAP_ERR_EXTERNAL;
4066 }
4067 }
4068
4069 pVCpu->iem.s.cXcptRecursions++;
4070 pVCpu->iem.s.uCurXcpt = u8Vector;
4071 pVCpu->iem.s.fCurXcpt = fFlags;
4072 pVCpu->iem.s.uCurXcptErr = uErr;
4073 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4074
4075 /*
4076 * Extensive logging.
4077 */
4078#if defined(LOG_ENABLED) && defined(IN_RING3)
4079 if (LogIs3Enabled())
4080 {
4081 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4082 char szRegs[4096];
4083 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4084 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4085 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4086 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4087 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4088 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4089 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4090 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4091 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4092 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4093 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4094 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4095 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4096 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4097 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4098 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4099 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4100 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4101 " efer=%016VR{efer}\n"
4102 " pat=%016VR{pat}\n"
4103 " sf_mask=%016VR{sf_mask}\n"
4104 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4105 " lstar=%016VR{lstar}\n"
4106 " star=%016VR{star} cstar=%016VR{cstar}\n"
4107 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4108 );
4109
4110 char szInstr[256];
4111 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4112 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4113 szInstr, sizeof(szInstr), NULL);
4114 Log3(("%s%s\n", szRegs, szInstr));
4115 }
4116#endif /* LOG_ENABLED */
4117
4118 /*
4119 * Stats.
4120 */
4121 uint64_t const uTimestamp = ASMReadTSC();
4122 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4123 {
4124 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4125 EMHistoryAddExit(pVCpu,
4126 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
4127 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
4128 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
4129 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4130 }
4131 else
4132 {
4133 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
4134 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4135 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4136 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
4137 if (fFlags & IEM_XCPT_FLAGS_ERR)
4138 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
4139 if (fFlags & IEM_XCPT_FLAGS_CR2)
4140 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
4141 }
4142
4143 /*
4144 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4145 * to ensure that a stale TLB or paging cache entry will only cause one
4146 * spurious #PF.
4147 */
4148 if ( u8Vector == X86_XCPT_PF
4149 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4150 IEMTlbInvalidatePage(pVCpu, uCr2);
4151
4152 /*
4153 * Call the mode specific worker function.
4154 */
4155 VBOXSTRICTRC rcStrict;
4156 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4157 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4158 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4159 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4160 else
4161 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4162
4163 /* Flush the prefetch buffer. */
4164 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4165
4166 /*
4167 * Unwind.
4168 */
4169 pVCpu->iem.s.cXcptRecursions--;
4170 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4171 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4172 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4173 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4174 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4175 return rcStrict;
4176}
4177
4178#ifdef IEM_WITH_SETJMP
4179/**
4180 * See iemRaiseXcptOrInt. Will not return.
4181 */
4182DECL_NO_RETURN(void)
4183iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4184 uint8_t cbInstr,
4185 uint8_t u8Vector,
4186 uint32_t fFlags,
4187 uint16_t uErr,
4188 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4189{
4190 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4191 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4192}
4193#endif
4194
4195
4196/** \#DE - 00. */
4197VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4198{
4199 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4200}
4201
4202
4203#ifdef IEM_WITH_SETJMP
4204/** \#DE - 00. */
4205DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4206{
4207 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4208}
4209#endif
4210
4211
4212/** \#DB - 01.
4213 * @note This automatically clear DR7.GD. */
4214VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4215{
4216 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4217 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4218 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4219}
4220
4221
4222/** \#BR - 05. */
4223VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4224{
4225 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4226}
4227
4228
4229/** \#UD - 06. */
4230VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4231{
4232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4233}
4234
4235
4236#ifdef IEM_WITH_SETJMP
4237/** \#UD - 06. */
4238DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4239{
4240 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4241}
4242#endif
4243
4244
4245/** \#NM - 07. */
4246VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4247{
4248 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4249}
4250
4251
4252#ifdef IEM_WITH_SETJMP
4253/** \#NM - 07. */
4254DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4255{
4256 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4257}
4258#endif
4259
4260
4261/** \#TS(err) - 0a. */
4262VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4263{
4264 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4265}
4266
4267
4268/** \#TS(tr) - 0a. */
4269VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4270{
4271 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4272 pVCpu->cpum.GstCtx.tr.Sel, 0);
4273}
4274
4275
4276/** \#TS(0) - 0a. */
4277VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4278{
4279 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4280 0, 0);
4281}
4282
4283
4284/** \#TS(err) - 0a. */
4285VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4286{
4287 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4288 uSel & X86_SEL_MASK_OFF_RPL, 0);
4289}
4290
4291
4292/** \#NP(err) - 0b. */
4293VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4294{
4295 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4296}
4297
4298
4299/** \#NP(sel) - 0b. */
4300VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4301{
4302 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4303 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4304 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4305 uSel & ~X86_SEL_RPL, 0);
4306}
4307
4308
4309/** \#SS(seg) - 0c. */
4310VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4311{
4312 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
4313 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
4314 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4315 uSel & ~X86_SEL_RPL, 0);
4316}
4317
4318
4319/** \#SS(err) - 0c. */
4320VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4321{
4322 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
4323 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4324 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4325}
4326
4327
4328/** \#GP(n) - 0d. */
4329VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4330{
4331 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
4332 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4333}
4334
4335
4336/** \#GP(0) - 0d. */
4337VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4338{
4339 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4340 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4341}
4342
4343#ifdef IEM_WITH_SETJMP
4344/** \#GP(0) - 0d. */
4345DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4346{
4347 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4348 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4349}
4350#endif
4351
4352
4353/** \#GP(sel) - 0d. */
4354VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4355{
4356 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4357 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4358 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4359 Sel & ~X86_SEL_RPL, 0);
4360}
4361
4362
4363/** \#GP(0) - 0d. */
4364VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4365{
4366 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
4367 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4368}
4369
4370
4371/** \#GP(sel) - 0d. */
4372VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4373{
4374 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4375 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4376 NOREF(iSegReg); NOREF(fAccess);
4377 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4378 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4379}
4380
4381#ifdef IEM_WITH_SETJMP
4382/** \#GP(sel) - 0d, longjmp. */
4383DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4384{
4385 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4386 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4387 NOREF(iSegReg); NOREF(fAccess);
4388 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4389 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4390}
4391#endif
4392
4393/** \#GP(sel) - 0d. */
4394VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4395{
4396 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
4397 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4398 NOREF(Sel);
4399 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4400}
4401
4402#ifdef IEM_WITH_SETJMP
4403/** \#GP(sel) - 0d, longjmp. */
4404DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4405{
4406 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
4407 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
4408 NOREF(Sel);
4409 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4410}
4411#endif
4412
4413
4414/** \#GP(sel) - 0d. */
4415VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4416{
4417 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
4418 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
4419 NOREF(iSegReg); NOREF(fAccess);
4420 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4421}
4422
4423#ifdef IEM_WITH_SETJMP
4424/** \#GP(sel) - 0d, longjmp. */
4425DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4426{
4427 NOREF(iSegReg); NOREF(fAccess);
4428 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4429}
4430#endif
4431
4432
4433/** \#PF(n) - 0e. */
4434VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4435{
4436 uint16_t uErr;
4437 switch (rc)
4438 {
4439 case VERR_PAGE_NOT_PRESENT:
4440 case VERR_PAGE_TABLE_NOT_PRESENT:
4441 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4442 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4443 uErr = 0;
4444 break;
4445
4446 default:
4447 AssertMsgFailed(("%Rrc\n", rc));
4448 RT_FALL_THRU();
4449 case VERR_ACCESS_DENIED:
4450 uErr = X86_TRAP_PF_P;
4451 break;
4452
4453 /** @todo reserved */
4454 }
4455
4456 if (IEM_GET_CPL(pVCpu) == 3)
4457 uErr |= X86_TRAP_PF_US;
4458
4459 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4460 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4461 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4462 uErr |= X86_TRAP_PF_ID;
4463
4464#if 0 /* This is so much non-sense, really. Why was it done like that? */
4465 /* Note! RW access callers reporting a WRITE protection fault, will clear
4466 the READ flag before calling. So, read-modify-write accesses (RW)
4467 can safely be reported as READ faults. */
4468 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4469 uErr |= X86_TRAP_PF_RW;
4470#else
4471 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4472 {
4473 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4474 /// (regardless of outcome of the comparison in the latter case).
4475 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4476 uErr |= X86_TRAP_PF_RW;
4477 }
4478#endif
4479
4480 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4481 of the memory operand rather than at the start of it. (Not sure what
4482 happens if it crosses a page boundrary.) The current heuristics for
4483 this is to report the #PF for the last byte if the access is more than
4484 64 bytes. This is probably not correct, but we can work that out later,
4485 main objective now is to get FXSAVE to work like for real hardware and
4486 make bs3-cpu-basic2 work. */
4487 if (cbAccess <= 64)
4488 { /* likely*/ }
4489 else
4490 GCPtrWhere += cbAccess - 1;
4491
4492 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4493 uErr, GCPtrWhere);
4494}
4495
4496#ifdef IEM_WITH_SETJMP
4497/** \#PF(n) - 0e, longjmp. */
4498DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4499 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4500{
4501 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4502}
4503#endif
4504
4505
4506/** \#MF(0) - 10. */
4507VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4508{
4509 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4510 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4511
4512 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4513 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4514 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4515}
4516
4517#ifdef IEM_WITH_SETJMP
4518/** \#MF(0) - 10, longjmp. */
4519DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4520{
4521 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
4522}
4523#endif
4524
4525
4526/** \#AC(0) - 11. */
4527VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4528{
4529 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4530}
4531
4532#ifdef IEM_WITH_SETJMP
4533/** \#AC(0) - 11, longjmp. */
4534DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4535{
4536 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4537}
4538#endif
4539
4540
4541/** \#XF(0)/\#XM(0) - 19. */
4542VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4543{
4544 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4545}
4546
4547
4548#ifdef IEM_WITH_SETJMP
4549/** \#XF(0)/\#XM(0) - 19s, longjmp. */
4550DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4551{
4552 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
4553}
4554#endif
4555
4556
4557/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4558IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4559{
4560 NOREF(cbInstr);
4561 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4562}
4563
4564
4565/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4566IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4567{
4568 NOREF(cbInstr);
4569 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4570}
4571
4572
4573/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4574IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4575{
4576 NOREF(cbInstr);
4577 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4578}
4579
4580
4581/** @} */
4582
4583/** @name Common opcode decoders.
4584 * @{
4585 */
4586//#include <iprt/mem.h>
4587
4588/**
4589 * Used to add extra details about a stub case.
4590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4591 */
4592void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4593{
4594#if defined(LOG_ENABLED) && defined(IN_RING3)
4595 PVM pVM = pVCpu->CTX_SUFF(pVM);
4596 char szRegs[4096];
4597 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4598 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4599 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4600 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4601 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4602 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4603 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4604 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4605 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4606 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4607 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4608 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4609 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4610 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4611 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4612 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4613 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4614 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4615 " efer=%016VR{efer}\n"
4616 " pat=%016VR{pat}\n"
4617 " sf_mask=%016VR{sf_mask}\n"
4618 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4619 " lstar=%016VR{lstar}\n"
4620 " star=%016VR{star} cstar=%016VR{cstar}\n"
4621 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4622 );
4623
4624 char szInstr[256];
4625 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4626 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4627 szInstr, sizeof(szInstr), NULL);
4628
4629 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4630#else
4631 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4632#endif
4633}
4634
4635/** @} */
4636
4637
4638
4639/** @name Register Access.
4640 * @{
4641 */
4642
4643/**
4644 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4645 *
4646 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4647 * segment limit.
4648 *
4649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4650 * @param cbInstr Instruction size.
4651 * @param offNextInstr The offset of the next instruction.
4652 * @param enmEffOpSize Effective operand size.
4653 */
4654VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4655 IEMMODE enmEffOpSize) RT_NOEXCEPT
4656{
4657 switch (enmEffOpSize)
4658 {
4659 case IEMMODE_16BIT:
4660 {
4661 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4662 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4663 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4664 pVCpu->cpum.GstCtx.rip = uNewIp;
4665 else
4666 return iemRaiseGeneralProtectionFault0(pVCpu);
4667 break;
4668 }
4669
4670 case IEMMODE_32BIT:
4671 {
4672 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4673 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4674
4675 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4676 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4677 pVCpu->cpum.GstCtx.rip = uNewEip;
4678 else
4679 return iemRaiseGeneralProtectionFault0(pVCpu);
4680 break;
4681 }
4682
4683 case IEMMODE_64BIT:
4684 {
4685 Assert(IEM_IS_64BIT_CODE(pVCpu));
4686
4687 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4688 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4689 pVCpu->cpum.GstCtx.rip = uNewRip;
4690 else
4691 return iemRaiseGeneralProtectionFault0(pVCpu);
4692 break;
4693 }
4694
4695 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4696 }
4697
4698#ifndef IEM_WITH_CODE_TLB
4699 /* Flush the prefetch buffer. */
4700 pVCpu->iem.s.cbOpcode = cbInstr;
4701#endif
4702
4703 /*
4704 * Clear RF and finish the instruction (maybe raise #DB).
4705 */
4706 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4707}
4708
4709
4710/**
4711 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4712 *
4713 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4714 * segment limit.
4715 *
4716 * @returns Strict VBox status code.
4717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4718 * @param cbInstr Instruction size.
4719 * @param offNextInstr The offset of the next instruction.
4720 */
4721VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4722{
4723 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4724
4725 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4726 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4727 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4728 pVCpu->cpum.GstCtx.rip = uNewIp;
4729 else
4730 return iemRaiseGeneralProtectionFault0(pVCpu);
4731
4732#ifndef IEM_WITH_CODE_TLB
4733 /* Flush the prefetch buffer. */
4734 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4735#endif
4736
4737 /*
4738 * Clear RF and finish the instruction (maybe raise #DB).
4739 */
4740 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4741}
4742
4743
4744/**
4745 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4746 *
4747 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4748 * segment limit.
4749 *
4750 * @returns Strict VBox status code.
4751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4752 * @param cbInstr Instruction size.
4753 * @param offNextInstr The offset of the next instruction.
4754 * @param enmEffOpSize Effective operand size.
4755 */
4756VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4757 IEMMODE enmEffOpSize) RT_NOEXCEPT
4758{
4759 if (enmEffOpSize == IEMMODE_32BIT)
4760 {
4761 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4762
4763 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4764 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4765 pVCpu->cpum.GstCtx.rip = uNewEip;
4766 else
4767 return iemRaiseGeneralProtectionFault0(pVCpu);
4768 }
4769 else
4770 {
4771 Assert(enmEffOpSize == IEMMODE_64BIT);
4772
4773 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4774 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4775 pVCpu->cpum.GstCtx.rip = uNewRip;
4776 else
4777 return iemRaiseGeneralProtectionFault0(pVCpu);
4778 }
4779
4780#ifndef IEM_WITH_CODE_TLB
4781 /* Flush the prefetch buffer. */
4782 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4783#endif
4784
4785 /*
4786 * Clear RF and finish the instruction (maybe raise #DB).
4787 */
4788 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4789}
4790
4791/** @} */
4792
4793
4794/** @name FPU access and helpers.
4795 *
4796 * @{
4797 */
4798
4799/**
4800 * Updates the x87.DS and FPUDP registers.
4801 *
4802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4803 * @param pFpuCtx The FPU context.
4804 * @param iEffSeg The effective segment register.
4805 * @param GCPtrEff The effective address relative to @a iEffSeg.
4806 */
4807DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4808{
4809 RTSEL sel;
4810 switch (iEffSeg)
4811 {
4812 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4813 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4814 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4815 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4816 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4817 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4818 default:
4819 AssertMsgFailed(("%d\n", iEffSeg));
4820 sel = pVCpu->cpum.GstCtx.ds.Sel;
4821 }
4822 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4823 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4824 {
4825 pFpuCtx->DS = 0;
4826 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4827 }
4828 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4829 {
4830 pFpuCtx->DS = sel;
4831 pFpuCtx->FPUDP = GCPtrEff;
4832 }
4833 else
4834 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4835}
4836
4837
4838/**
4839 * Rotates the stack registers in the push direction.
4840 *
4841 * @param pFpuCtx The FPU context.
4842 * @remarks This is a complete waste of time, but fxsave stores the registers in
4843 * stack order.
4844 */
4845DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4846{
4847 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4848 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4849 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4850 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4851 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4852 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4853 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4854 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4855 pFpuCtx->aRegs[0].r80 = r80Tmp;
4856}
4857
4858
4859/**
4860 * Rotates the stack registers in the pop direction.
4861 *
4862 * @param pFpuCtx The FPU context.
4863 * @remarks This is a complete waste of time, but fxsave stores the registers in
4864 * stack order.
4865 */
4866DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4867{
4868 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4869 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4870 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4871 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4872 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4873 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4874 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4875 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4876 pFpuCtx->aRegs[7].r80 = r80Tmp;
4877}
4878
4879
4880/**
4881 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4882 * exception prevents it.
4883 *
4884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4885 * @param pResult The FPU operation result to push.
4886 * @param pFpuCtx The FPU context.
4887 */
4888static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4889{
4890 /* Update FSW and bail if there are pending exceptions afterwards. */
4891 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4892 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4893 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4894 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4895 {
4896 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4897 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4898 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4899 pFpuCtx->FSW = fFsw;
4900 return;
4901 }
4902
4903 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4904 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4905 {
4906 /* All is fine, push the actual value. */
4907 pFpuCtx->FTW |= RT_BIT(iNewTop);
4908 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4909 }
4910 else if (pFpuCtx->FCW & X86_FCW_IM)
4911 {
4912 /* Masked stack overflow, push QNaN. */
4913 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4914 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4915 }
4916 else
4917 {
4918 /* Raise stack overflow, don't push anything. */
4919 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4920 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4921 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4922 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4923 return;
4924 }
4925
4926 fFsw &= ~X86_FSW_TOP_MASK;
4927 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4928 pFpuCtx->FSW = fFsw;
4929
4930 iemFpuRotateStackPush(pFpuCtx);
4931 RT_NOREF(pVCpu);
4932}
4933
4934
4935/**
4936 * Stores a result in a FPU register and updates the FSW and FTW.
4937 *
4938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4939 * @param pFpuCtx The FPU context.
4940 * @param pResult The result to store.
4941 * @param iStReg Which FPU register to store it in.
4942 */
4943static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4944{
4945 Assert(iStReg < 8);
4946 uint16_t fNewFsw = pFpuCtx->FSW;
4947 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4948 fNewFsw &= ~X86_FSW_C_MASK;
4949 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4950 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4951 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4952 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4953 pFpuCtx->FSW = fNewFsw;
4954 pFpuCtx->FTW |= RT_BIT(iReg);
4955 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4956 RT_NOREF(pVCpu);
4957}
4958
4959
4960/**
4961 * Only updates the FPU status word (FSW) with the result of the current
4962 * instruction.
4963 *
4964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4965 * @param pFpuCtx The FPU context.
4966 * @param u16FSW The FSW output of the current instruction.
4967 */
4968static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4969{
4970 uint16_t fNewFsw = pFpuCtx->FSW;
4971 fNewFsw &= ~X86_FSW_C_MASK;
4972 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4973 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4974 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4975 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4976 pFpuCtx->FSW = fNewFsw;
4977 RT_NOREF(pVCpu);
4978}
4979
4980
4981/**
4982 * Pops one item off the FPU stack if no pending exception prevents it.
4983 *
4984 * @param pFpuCtx The FPU context.
4985 */
4986static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4987{
4988 /* Check pending exceptions. */
4989 uint16_t uFSW = pFpuCtx->FSW;
4990 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4991 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4992 return;
4993
4994 /* TOP--. */
4995 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4996 uFSW &= ~X86_FSW_TOP_MASK;
4997 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4998 pFpuCtx->FSW = uFSW;
4999
5000 /* Mark the previous ST0 as empty. */
5001 iOldTop >>= X86_FSW_TOP_SHIFT;
5002 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
5003
5004 /* Rotate the registers. */
5005 iemFpuRotateStackPop(pFpuCtx);
5006}
5007
5008
5009/**
5010 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
5011 *
5012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5013 * @param pResult The FPU operation result to push.
5014 * @param uFpuOpcode The FPU opcode value.
5015 */
5016void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5017{
5018 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5019 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5020 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5021}
5022
5023
5024/**
5025 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
5026 * and sets FPUDP and FPUDS.
5027 *
5028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5029 * @param pResult The FPU operation result to push.
5030 * @param iEffSeg The effective segment register.
5031 * @param GCPtrEff The effective address relative to @a iEffSeg.
5032 * @param uFpuOpcode The FPU opcode value.
5033 */
5034void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5035 uint16_t uFpuOpcode) RT_NOEXCEPT
5036{
5037 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5038 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5039 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5040 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
5041}
5042
5043
5044/**
5045 * Replace ST0 with the first value and push the second onto the FPU stack,
5046 * unless a pending exception prevents it.
5047 *
5048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5049 * @param pResult The FPU operation result to store and push.
5050 * @param uFpuOpcode The FPU opcode value.
5051 */
5052void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5053{
5054 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5055 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5056
5057 /* Update FSW and bail if there are pending exceptions afterwards. */
5058 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5059 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5060 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5061 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5062 {
5063 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5064 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5065 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5066 pFpuCtx->FSW = fFsw;
5067 return;
5068 }
5069
5070 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5071 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5072 {
5073 /* All is fine, push the actual value. */
5074 pFpuCtx->FTW |= RT_BIT(iNewTop);
5075 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5076 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5077 }
5078 else if (pFpuCtx->FCW & X86_FCW_IM)
5079 {
5080 /* Masked stack overflow, push QNaN. */
5081 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5082 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5083 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5084 }
5085 else
5086 {
5087 /* Raise stack overflow, don't push anything. */
5088 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5089 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5090 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5091 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5092 return;
5093 }
5094
5095 fFsw &= ~X86_FSW_TOP_MASK;
5096 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5097 pFpuCtx->FSW = fFsw;
5098
5099 iemFpuRotateStackPush(pFpuCtx);
5100}
5101
5102
5103/**
5104 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5105 * FOP.
5106 *
5107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5108 * @param pResult The result to store.
5109 * @param iStReg Which FPU register to store it in.
5110 * @param uFpuOpcode The FPU opcode value.
5111 */
5112void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5113{
5114 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5115 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5116 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5117}
5118
5119
5120/**
5121 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5122 * FOP, and then pops the stack.
5123 *
5124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5125 * @param pResult The result to store.
5126 * @param iStReg Which FPU register to store it in.
5127 * @param uFpuOpcode The FPU opcode value.
5128 */
5129void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5130{
5131 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5132 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5133 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5134 iemFpuMaybePopOne(pFpuCtx);
5135}
5136
5137
5138/**
5139 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5140 * FPUDP, and FPUDS.
5141 *
5142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5143 * @param pResult The result to store.
5144 * @param iStReg Which FPU register to store it in.
5145 * @param iEffSeg The effective memory operand selector register.
5146 * @param GCPtrEff The effective memory operand offset.
5147 * @param uFpuOpcode The FPU opcode value.
5148 */
5149void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5150 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5151{
5152 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5153 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5154 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5155 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5156}
5157
5158
5159/**
5160 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5161 * FPUDP, and FPUDS, and then pops the stack.
5162 *
5163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5164 * @param pResult The result to store.
5165 * @param iStReg Which FPU register to store it in.
5166 * @param iEffSeg The effective memory operand selector register.
5167 * @param GCPtrEff The effective memory operand offset.
5168 * @param uFpuOpcode The FPU opcode value.
5169 */
5170void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5171 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5172{
5173 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5174 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5175 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5176 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5177 iemFpuMaybePopOne(pFpuCtx);
5178}
5179
5180
5181/**
5182 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5183 *
5184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5185 * @param uFpuOpcode The FPU opcode value.
5186 */
5187void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5188{
5189 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5190 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5191}
5192
5193
5194/**
5195 * Updates the FSW, FOP, FPUIP, and FPUCS.
5196 *
5197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5198 * @param u16FSW The FSW from the current instruction.
5199 * @param uFpuOpcode The FPU opcode value.
5200 */
5201void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5202{
5203 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5204 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5205 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5206}
5207
5208
5209/**
5210 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5211 *
5212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5213 * @param u16FSW The FSW from the current instruction.
5214 * @param uFpuOpcode The FPU opcode value.
5215 */
5216void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5217{
5218 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5219 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5220 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5221 iemFpuMaybePopOne(pFpuCtx);
5222}
5223
5224
5225/**
5226 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5227 *
5228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5229 * @param u16FSW The FSW from the current instruction.
5230 * @param iEffSeg The effective memory operand selector register.
5231 * @param GCPtrEff The effective memory operand offset.
5232 * @param uFpuOpcode The FPU opcode value.
5233 */
5234void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5235{
5236 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5237 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5238 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5239 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5240}
5241
5242
5243/**
5244 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5245 *
5246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5247 * @param u16FSW The FSW from the current instruction.
5248 * @param uFpuOpcode The FPU opcode value.
5249 */
5250void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5251{
5252 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5253 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5254 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5255 iemFpuMaybePopOne(pFpuCtx);
5256 iemFpuMaybePopOne(pFpuCtx);
5257}
5258
5259
5260/**
5261 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5262 *
5263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5264 * @param u16FSW The FSW from the current instruction.
5265 * @param iEffSeg The effective memory operand selector register.
5266 * @param GCPtrEff The effective memory operand offset.
5267 * @param uFpuOpcode The FPU opcode value.
5268 */
5269void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5270 uint16_t uFpuOpcode) RT_NOEXCEPT
5271{
5272 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5273 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5274 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5275 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5276 iemFpuMaybePopOne(pFpuCtx);
5277}
5278
5279
5280/**
5281 * Worker routine for raising an FPU stack underflow exception.
5282 *
5283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5284 * @param pFpuCtx The FPU context.
5285 * @param iStReg The stack register being accessed.
5286 */
5287static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5288{
5289 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5290 if (pFpuCtx->FCW & X86_FCW_IM)
5291 {
5292 /* Masked underflow. */
5293 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5294 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5295 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5296 if (iStReg != UINT8_MAX)
5297 {
5298 pFpuCtx->FTW |= RT_BIT(iReg);
5299 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5300 }
5301 }
5302 else
5303 {
5304 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5305 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5306 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5307 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5308 }
5309 RT_NOREF(pVCpu);
5310}
5311
5312
5313/**
5314 * Raises a FPU stack underflow exception.
5315 *
5316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5317 * @param iStReg The destination register that should be loaded
5318 * with QNaN if \#IS is not masked. Specify
5319 * UINT8_MAX if none (like for fcom).
5320 * @param uFpuOpcode The FPU opcode value.
5321 */
5322void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5323{
5324 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5325 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5326 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5327}
5328
5329
5330void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5331{
5332 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5333 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5334 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5335 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5336}
5337
5338
5339void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5340{
5341 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5342 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5343 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5344 iemFpuMaybePopOne(pFpuCtx);
5345}
5346
5347
5348void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5349 uint16_t uFpuOpcode) RT_NOEXCEPT
5350{
5351 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5352 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5353 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5354 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5355 iemFpuMaybePopOne(pFpuCtx);
5356}
5357
5358
5359void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5360{
5361 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5362 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5363 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5364 iemFpuMaybePopOne(pFpuCtx);
5365 iemFpuMaybePopOne(pFpuCtx);
5366}
5367
5368
5369void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5370{
5371 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5372 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5373
5374 if (pFpuCtx->FCW & X86_FCW_IM)
5375 {
5376 /* Masked overflow - Push QNaN. */
5377 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5378 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5379 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5380 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5381 pFpuCtx->FTW |= RT_BIT(iNewTop);
5382 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5383 iemFpuRotateStackPush(pFpuCtx);
5384 }
5385 else
5386 {
5387 /* Exception pending - don't change TOP or the register stack. */
5388 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5389 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5390 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5391 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5392 }
5393}
5394
5395
5396void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5397{
5398 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5399 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5400
5401 if (pFpuCtx->FCW & X86_FCW_IM)
5402 {
5403 /* Masked overflow - Push QNaN. */
5404 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5405 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5406 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5407 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5408 pFpuCtx->FTW |= RT_BIT(iNewTop);
5409 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5410 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5411 iemFpuRotateStackPush(pFpuCtx);
5412 }
5413 else
5414 {
5415 /* Exception pending - don't change TOP or the register stack. */
5416 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5417 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5418 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5419 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5420 }
5421}
5422
5423
5424/**
5425 * Worker routine for raising an FPU stack overflow exception on a push.
5426 *
5427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5428 * @param pFpuCtx The FPU context.
5429 */
5430static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5431{
5432 if (pFpuCtx->FCW & X86_FCW_IM)
5433 {
5434 /* Masked overflow. */
5435 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5436 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5437 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5438 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5439 pFpuCtx->FTW |= RT_BIT(iNewTop);
5440 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5441 iemFpuRotateStackPush(pFpuCtx);
5442 }
5443 else
5444 {
5445 /* Exception pending - don't change TOP or the register stack. */
5446 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5447 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5448 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5449 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5450 }
5451 RT_NOREF(pVCpu);
5452}
5453
5454
5455/**
5456 * Raises a FPU stack overflow exception on a push.
5457 *
5458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5459 * @param uFpuOpcode The FPU opcode value.
5460 */
5461void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5462{
5463 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5464 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5465 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5466}
5467
5468
5469/**
5470 * Raises a FPU stack overflow exception on a push with a memory operand.
5471 *
5472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5473 * @param iEffSeg The effective memory operand selector register.
5474 * @param GCPtrEff The effective memory operand offset.
5475 * @param uFpuOpcode The FPU opcode value.
5476 */
5477void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5478{
5479 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5480 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5481 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5482 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5483}
5484
5485/** @} */
5486
5487
5488/** @name SSE+AVX SIMD access and helpers.
5489 *
5490 * @{
5491 */
5492/**
5493 * Stores a result in a SIMD XMM register, updates the MXCSR.
5494 *
5495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5496 * @param pResult The result to store.
5497 * @param iXmmReg Which SIMD XMM register to store the result in.
5498 */
5499void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5500{
5501 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5502 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5503
5504 /* The result is only updated if there is no unmasked exception pending. */
5505 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5506 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5507 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5508}
5509
5510
5511/**
5512 * Updates the MXCSR.
5513 *
5514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5515 * @param fMxcsr The new MXCSR value.
5516 */
5517void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5518{
5519 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5520 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5521}
5522/** @} */
5523
5524
5525/** @name Memory access.
5526 *
5527 * @{
5528 */
5529
5530#undef LOG_GROUP
5531#define LOG_GROUP LOG_GROUP_IEM_MEM
5532
5533/**
5534 * Updates the IEMCPU::cbWritten counter if applicable.
5535 *
5536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5537 * @param fAccess The access being accounted for.
5538 * @param cbMem The access size.
5539 */
5540DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5541{
5542 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5543 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5544 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5545}
5546
5547
5548/**
5549 * Applies the segment limit, base and attributes.
5550 *
5551 * This may raise a \#GP or \#SS.
5552 *
5553 * @returns VBox strict status code.
5554 *
5555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5556 * @param fAccess The kind of access which is being performed.
5557 * @param iSegReg The index of the segment register to apply.
5558 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5559 * TSS, ++).
5560 * @param cbMem The access size.
5561 * @param pGCPtrMem Pointer to the guest memory address to apply
5562 * segmentation to. Input and output parameter.
5563 */
5564VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5565{
5566 if (iSegReg == UINT8_MAX)
5567 return VINF_SUCCESS;
5568
5569 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5570 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5571 switch (IEM_GET_CPU_MODE(pVCpu))
5572 {
5573 case IEMMODE_16BIT:
5574 case IEMMODE_32BIT:
5575 {
5576 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5577 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5578
5579 if ( pSel->Attr.n.u1Present
5580 && !pSel->Attr.n.u1Unusable)
5581 {
5582 Assert(pSel->Attr.n.u1DescType);
5583 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5584 {
5585 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5586 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5587 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5588
5589 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5590 {
5591 /** @todo CPL check. */
5592 }
5593
5594 /*
5595 * There are two kinds of data selectors, normal and expand down.
5596 */
5597 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5598 {
5599 if ( GCPtrFirst32 > pSel->u32Limit
5600 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5601 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5602 }
5603 else
5604 {
5605 /*
5606 * The upper boundary is defined by the B bit, not the G bit!
5607 */
5608 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5609 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5610 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5611 }
5612 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5613 }
5614 else
5615 {
5616 /*
5617 * Code selector and usually be used to read thru, writing is
5618 * only permitted in real and V8086 mode.
5619 */
5620 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5621 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5622 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5623 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5624 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5625
5626 if ( GCPtrFirst32 > pSel->u32Limit
5627 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5628 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5629
5630 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5631 {
5632 /** @todo CPL check. */
5633 }
5634
5635 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5636 }
5637 }
5638 else
5639 return iemRaiseGeneralProtectionFault0(pVCpu);
5640 return VINF_SUCCESS;
5641 }
5642
5643 case IEMMODE_64BIT:
5644 {
5645 RTGCPTR GCPtrMem = *pGCPtrMem;
5646 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5647 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5648
5649 Assert(cbMem >= 1);
5650 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5651 return VINF_SUCCESS;
5652 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5653 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5654 return iemRaiseGeneralProtectionFault0(pVCpu);
5655 }
5656
5657 default:
5658 AssertFailedReturn(VERR_IEM_IPE_7);
5659 }
5660}
5661
5662
5663/**
5664 * Translates a virtual address to a physical physical address and checks if we
5665 * can access the page as specified.
5666 *
5667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5668 * @param GCPtrMem The virtual address.
5669 * @param cbAccess The access size, for raising \#PF correctly for
5670 * FXSAVE and such.
5671 * @param fAccess The intended access.
5672 * @param pGCPhysMem Where to return the physical address.
5673 */
5674VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5675 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5676{
5677 /** @todo Need a different PGM interface here. We're currently using
5678 * generic / REM interfaces. this won't cut it for R0. */
5679 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5680 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5681 * here. */
5682 PGMPTWALK Walk;
5683 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5684 if (RT_FAILURE(rc))
5685 {
5686 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5687 /** @todo Check unassigned memory in unpaged mode. */
5688 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5689#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5690 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5691 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5692#endif
5693 *pGCPhysMem = NIL_RTGCPHYS;
5694 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5695 }
5696
5697 /* If the page is writable and does not have the no-exec bit set, all
5698 access is allowed. Otherwise we'll have to check more carefully... */
5699 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5700 {
5701 /* Write to read only memory? */
5702 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5703 && !(Walk.fEffective & X86_PTE_RW)
5704 && ( ( IEM_GET_CPL(pVCpu) == 3
5705 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5706 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5707 {
5708 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5709 *pGCPhysMem = NIL_RTGCPHYS;
5710#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5711 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5712 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5713#endif
5714 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5715 }
5716
5717 /* Kernel memory accessed by userland? */
5718 if ( !(Walk.fEffective & X86_PTE_US)
5719 && IEM_GET_CPL(pVCpu) == 3
5720 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5721 {
5722 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5723 *pGCPhysMem = NIL_RTGCPHYS;
5724#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5725 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5726 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5727#endif
5728 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5729 }
5730
5731 /* Executing non-executable memory? */
5732 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5733 && (Walk.fEffective & X86_PTE_PAE_NX)
5734 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5735 {
5736 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5737 *pGCPhysMem = NIL_RTGCPHYS;
5738#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5739 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5740 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5741#endif
5742 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5743 VERR_ACCESS_DENIED);
5744 }
5745 }
5746
5747 /*
5748 * Set the dirty / access flags.
5749 * ASSUMES this is set when the address is translated rather than on committ...
5750 */
5751 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5752 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5753 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5754 {
5755 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5756 AssertRC(rc2);
5757 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5758 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5759 }
5760
5761 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5762 *pGCPhysMem = GCPhys;
5763 return VINF_SUCCESS;
5764}
5765
5766#if 0 /*unused*/
5767/**
5768 * Looks up a memory mapping entry.
5769 *
5770 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5772 * @param pvMem The memory address.
5773 * @param fAccess The access to.
5774 */
5775DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5776{
5777 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5778 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5779 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5780 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5781 return 0;
5782 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5783 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5784 return 1;
5785 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5786 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5787 return 2;
5788 return VERR_NOT_FOUND;
5789}
5790#endif
5791
5792/**
5793 * Finds a free memmap entry when using iNextMapping doesn't work.
5794 *
5795 * @returns Memory mapping index, 1024 on failure.
5796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5797 */
5798static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5799{
5800 /*
5801 * The easy case.
5802 */
5803 if (pVCpu->iem.s.cActiveMappings == 0)
5804 {
5805 pVCpu->iem.s.iNextMapping = 1;
5806 return 0;
5807 }
5808
5809 /* There should be enough mappings for all instructions. */
5810 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5811
5812 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5813 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5814 return i;
5815
5816 AssertFailedReturn(1024);
5817}
5818
5819
5820/**
5821 * Commits a bounce buffer that needs writing back and unmaps it.
5822 *
5823 * @returns Strict VBox status code.
5824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5825 * @param iMemMap The index of the buffer to commit.
5826 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5827 * Always false in ring-3, obviously.
5828 */
5829static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5830{
5831 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5832 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5833#ifdef IN_RING3
5834 Assert(!fPostponeFail);
5835 RT_NOREF_PV(fPostponeFail);
5836#endif
5837
5838 /*
5839 * Do the writing.
5840 */
5841 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5842 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5843 {
5844 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5845 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5846 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5847 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5848 {
5849 /*
5850 * Carefully and efficiently dealing with access handler return
5851 * codes make this a little bloated.
5852 */
5853 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5855 pbBuf,
5856 cbFirst,
5857 PGMACCESSORIGIN_IEM);
5858 if (rcStrict == VINF_SUCCESS)
5859 {
5860 if (cbSecond)
5861 {
5862 rcStrict = PGMPhysWrite(pVM,
5863 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5864 pbBuf + cbFirst,
5865 cbSecond,
5866 PGMACCESSORIGIN_IEM);
5867 if (rcStrict == VINF_SUCCESS)
5868 { /* nothing */ }
5869 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5870 {
5871 LogEx(LOG_GROUP_IEM,
5872 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5873 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5874 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5875 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5876 }
5877#ifndef IN_RING3
5878 else if (fPostponeFail)
5879 {
5880 LogEx(LOG_GROUP_IEM,
5881 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5882 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5883 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5884 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5885 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5886 return iemSetPassUpStatus(pVCpu, rcStrict);
5887 }
5888#endif
5889 else
5890 {
5891 LogEx(LOG_GROUP_IEM,
5892 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5893 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5894 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5895 return rcStrict;
5896 }
5897 }
5898 }
5899 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5900 {
5901 if (!cbSecond)
5902 {
5903 LogEx(LOG_GROUP_IEM,
5904 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5905 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5906 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5907 }
5908 else
5909 {
5910 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5912 pbBuf + cbFirst,
5913 cbSecond,
5914 PGMACCESSORIGIN_IEM);
5915 if (rcStrict2 == VINF_SUCCESS)
5916 {
5917 LogEx(LOG_GROUP_IEM,
5918 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5919 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5920 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5921 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5922 }
5923 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5924 {
5925 LogEx(LOG_GROUP_IEM,
5926 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5927 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5928 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5929 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5930 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5931 }
5932#ifndef IN_RING3
5933 else if (fPostponeFail)
5934 {
5935 LogEx(LOG_GROUP_IEM,
5936 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5937 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5938 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5939 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5940 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5941 return iemSetPassUpStatus(pVCpu, rcStrict);
5942 }
5943#endif
5944 else
5945 {
5946 LogEx(LOG_GROUP_IEM,
5947 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5948 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5949 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5950 return rcStrict2;
5951 }
5952 }
5953 }
5954#ifndef IN_RING3
5955 else if (fPostponeFail)
5956 {
5957 LogEx(LOG_GROUP_IEM,
5958 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5959 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5960 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5961 if (!cbSecond)
5962 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5963 else
5964 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5965 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5966 return iemSetPassUpStatus(pVCpu, rcStrict);
5967 }
5968#endif
5969 else
5970 {
5971 LogEx(LOG_GROUP_IEM,
5972 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5973 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5974 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5975 return rcStrict;
5976 }
5977 }
5978 else
5979 {
5980 /*
5981 * No access handlers, much simpler.
5982 */
5983 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5984 if (RT_SUCCESS(rc))
5985 {
5986 if (cbSecond)
5987 {
5988 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5989 if (RT_SUCCESS(rc))
5990 { /* likely */ }
5991 else
5992 {
5993 LogEx(LOG_GROUP_IEM,
5994 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5995 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5996 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5997 return rc;
5998 }
5999 }
6000 }
6001 else
6002 {
6003 LogEx(LOG_GROUP_IEM,
6004 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
6005 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
6006 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
6007 return rc;
6008 }
6009 }
6010 }
6011
6012#if defined(IEM_LOG_MEMORY_WRITES)
6013 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
6014 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
6015 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
6016 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
6017 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
6018 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
6019
6020 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
6021 g_cbIemWrote = cbWrote;
6022 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
6023#endif
6024
6025 /*
6026 * Free the mapping entry.
6027 */
6028 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6029 Assert(pVCpu->iem.s.cActiveMappings != 0);
6030 pVCpu->iem.s.cActiveMappings--;
6031 return VINF_SUCCESS;
6032}
6033
6034
6035/**
6036 * iemMemMap worker that deals with a request crossing pages.
6037 */
6038static VBOXSTRICTRC
6039iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
6040 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
6041{
6042 Assert(cbMem <= GUEST_PAGE_SIZE);
6043
6044 /*
6045 * Do the address translations.
6046 */
6047 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
6048 RTGCPHYS GCPhysFirst;
6049 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6050 if (rcStrict != VINF_SUCCESS)
6051 return rcStrict;
6052 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6053
6054 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6055 RTGCPHYS GCPhysSecond;
6056 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6057 cbSecondPage, fAccess, &GCPhysSecond);
6058 if (rcStrict != VINF_SUCCESS)
6059 return rcStrict;
6060 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6061 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6062
6063 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6064
6065 /*
6066 * Read in the current memory content if it's a read, execute or partial
6067 * write access.
6068 */
6069 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6070
6071 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6072 {
6073 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6074 {
6075 /*
6076 * Must carefully deal with access handler status codes here,
6077 * makes the code a bit bloated.
6078 */
6079 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6080 if (rcStrict == VINF_SUCCESS)
6081 {
6082 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6083 if (rcStrict == VINF_SUCCESS)
6084 { /*likely */ }
6085 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6086 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6087 else
6088 {
6089 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6090 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6091 return rcStrict;
6092 }
6093 }
6094 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6095 {
6096 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6097 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6098 {
6099 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6100 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6101 }
6102 else
6103 {
6104 LogEx(LOG_GROUP_IEM,
6105 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6106 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6107 return rcStrict2;
6108 }
6109 }
6110 else
6111 {
6112 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6113 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6114 return rcStrict;
6115 }
6116 }
6117 else
6118 {
6119 /*
6120 * No informational status codes here, much more straight forward.
6121 */
6122 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6123 if (RT_SUCCESS(rc))
6124 {
6125 Assert(rc == VINF_SUCCESS);
6126 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6127 if (RT_SUCCESS(rc))
6128 Assert(rc == VINF_SUCCESS);
6129 else
6130 {
6131 LogEx(LOG_GROUP_IEM,
6132 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6133 return rc;
6134 }
6135 }
6136 else
6137 {
6138 LogEx(LOG_GROUP_IEM,
6139 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6140 return rc;
6141 }
6142 }
6143 }
6144#ifdef VBOX_STRICT
6145 else
6146 memset(pbBuf, 0xcc, cbMem);
6147 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6148 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6149#endif
6150 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6151
6152 /*
6153 * Commit the bounce buffer entry.
6154 */
6155 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6156 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6157 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6158 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6159 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6160 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6161 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6162 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6163 pVCpu->iem.s.cActiveMappings++;
6164
6165 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6166 *ppvMem = pbBuf;
6167 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6168 return VINF_SUCCESS;
6169}
6170
6171
6172/**
6173 * iemMemMap woker that deals with iemMemPageMap failures.
6174 */
6175static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
6176 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6177{
6178 /*
6179 * Filter out conditions we can handle and the ones which shouldn't happen.
6180 */
6181 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6182 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6183 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6184 {
6185 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6186 return rcMap;
6187 }
6188 pVCpu->iem.s.cPotentialExits++;
6189
6190 /*
6191 * Read in the current memory content if it's a read, execute or partial
6192 * write access.
6193 */
6194 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6195 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6196 {
6197 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6198 memset(pbBuf, 0xff, cbMem);
6199 else
6200 {
6201 int rc;
6202 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6203 {
6204 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6205 if (rcStrict == VINF_SUCCESS)
6206 { /* nothing */ }
6207 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6208 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6209 else
6210 {
6211 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6212 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6213 return rcStrict;
6214 }
6215 }
6216 else
6217 {
6218 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6219 if (RT_SUCCESS(rc))
6220 { /* likely */ }
6221 else
6222 {
6223 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6224 GCPhysFirst, rc));
6225 return rc;
6226 }
6227 }
6228 }
6229 }
6230#ifdef VBOX_STRICT
6231 else
6232 memset(pbBuf, 0xcc, cbMem);
6233#endif
6234#ifdef VBOX_STRICT
6235 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6236 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6237#endif
6238
6239 /*
6240 * Commit the bounce buffer entry.
6241 */
6242 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6243 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6244 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6245 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6246 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6247 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6248 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6249 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6250 pVCpu->iem.s.cActiveMappings++;
6251
6252 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6253 *ppvMem = pbBuf;
6254 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6255 return VINF_SUCCESS;
6256}
6257
6258
6259
6260/**
6261 * Maps the specified guest memory for the given kind of access.
6262 *
6263 * This may be using bounce buffering of the memory if it's crossing a page
6264 * boundary or if there is an access handler installed for any of it. Because
6265 * of lock prefix guarantees, we're in for some extra clutter when this
6266 * happens.
6267 *
6268 * This may raise a \#GP, \#SS, \#PF or \#AC.
6269 *
6270 * @returns VBox strict status code.
6271 *
6272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6273 * @param ppvMem Where to return the pointer to the mapped memory.
6274 * @param pbUnmapInfo Where to return unmap info to be passed to
6275 * iemMemCommitAndUnmap or iemMemRollbackAndUnmap when
6276 * done.
6277 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6278 * 8, 12, 16, 32 or 512. When used by string operations
6279 * it can be up to a page.
6280 * @param iSegReg The index of the segment register to use for this
6281 * access. The base and limits are checked. Use UINT8_MAX
6282 * to indicate that no segmentation is required (for IDT,
6283 * GDT and LDT accesses).
6284 * @param GCPtrMem The address of the guest memory.
6285 * @param fAccess How the memory is being accessed. The
6286 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6287 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6288 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6289 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6290 * set.
6291 * @param uAlignCtl Alignment control:
6292 * - Bits 15:0 is the alignment mask.
6293 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6294 * IEM_MEMMAP_F_ALIGN_SSE, and
6295 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6296 * Pass zero to skip alignment.
6297 */
6298VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6299 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6300{
6301 /*
6302 * Check the input and figure out which mapping entry to use.
6303 */
6304 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6305 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6306 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6307 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6308 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6309
6310 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6311 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6312 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6313 {
6314 iMemMap = iemMemMapFindFree(pVCpu);
6315 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6316 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6317 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6318 pVCpu->iem.s.aMemMappings[2].fAccess),
6319 VERR_IEM_IPE_9);
6320 }
6321
6322 /*
6323 * Map the memory, checking that we can actually access it. If something
6324 * slightly complicated happens, fall back on bounce buffering.
6325 */
6326 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6327 if (rcStrict == VINF_SUCCESS)
6328 { /* likely */ }
6329 else
6330 return rcStrict;
6331
6332 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6333 { /* likely */ }
6334 else
6335 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6336
6337 /*
6338 * Alignment check.
6339 */
6340 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6341 { /* likelyish */ }
6342 else
6343 {
6344 /* Misaligned access. */
6345 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6346 {
6347 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6348 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6349 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6350 {
6351 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6352
6353 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6354 return iemRaiseAlignmentCheckException(pVCpu);
6355 }
6356 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6357 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6358 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6359 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6360 * that's what FXSAVE does on a 10980xe. */
6361 && iemMemAreAlignmentChecksEnabled(pVCpu))
6362 return iemRaiseAlignmentCheckException(pVCpu);
6363 else
6364 return iemRaiseGeneralProtectionFault0(pVCpu);
6365 }
6366
6367#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6368 /* If the access is atomic there are host platform alignmnet restrictions
6369 we need to conform with. */
6370 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6371# if defined(RT_ARCH_AMD64)
6372 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6373# elif defined(RT_ARCH_ARM64)
6374 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6375# else
6376# error port me
6377# endif
6378 )
6379 { /* okay */ }
6380 else
6381 {
6382 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6383 pVCpu->iem.s.cMisalignedAtomics += 1;
6384 return VINF_EM_EMULATE_SPLIT_LOCK;
6385 }
6386#endif
6387 }
6388
6389#ifdef IEM_WITH_DATA_TLB
6390 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6391
6392 /*
6393 * Get the TLB entry for this page.
6394 */
6395 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6396 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6397 if (pTlbe->uTag == uTag)
6398 {
6399# ifdef VBOX_WITH_STATISTICS
6400 pVCpu->iem.s.DataTlb.cTlbHits++;
6401# endif
6402 }
6403 else
6404 {
6405 pVCpu->iem.s.DataTlb.cTlbMisses++;
6406 PGMPTWALK Walk;
6407 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6408 if (RT_FAILURE(rc))
6409 {
6410 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6411# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6412 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6413 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6414# endif
6415 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6416 }
6417
6418 Assert(Walk.fSucceeded);
6419 pTlbe->uTag = uTag;
6420 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6421 pTlbe->GCPhys = Walk.GCPhys;
6422 pTlbe->pbMappingR3 = NULL;
6423 }
6424
6425 /*
6426 * Check TLB page table level access flags.
6427 */
6428 /* If the page is either supervisor only or non-writable, we need to do
6429 more careful access checks. */
6430 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6431 {
6432 /* Write to read only memory? */
6433 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6434 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6435 && ( ( IEM_GET_CPL(pVCpu) == 3
6436 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6437 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6438 {
6439 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6440# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6441 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6442 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6443# endif
6444 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6445 }
6446
6447 /* Kernel memory accessed by userland? */
6448 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6449 && IEM_GET_CPL(pVCpu) == 3
6450 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6451 {
6452 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6453# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6454 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6455 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6456# endif
6457 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6458 }
6459 }
6460
6461 /*
6462 * Set the dirty / access flags.
6463 * ASSUMES this is set when the address is translated rather than on commit...
6464 */
6465 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6466 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6467 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6468 {
6469 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6470 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6471 AssertRC(rc2);
6472 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6473 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6474 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6475 }
6476
6477 /*
6478 * Look up the physical page info if necessary.
6479 */
6480 uint8_t *pbMem = NULL;
6481 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6482# ifdef IN_RING3
6483 pbMem = pTlbe->pbMappingR3;
6484# else
6485 pbMem = NULL;
6486# endif
6487 else
6488 {
6489 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6490 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6491 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6492 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6493 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6494 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6495 { /* likely */ }
6496 else
6497 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6498 pTlbe->pbMappingR3 = NULL;
6499 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6500 | IEMTLBE_F_NO_MAPPINGR3
6501 | IEMTLBE_F_PG_NO_READ
6502 | IEMTLBE_F_PG_NO_WRITE
6503 | IEMTLBE_F_PG_UNASSIGNED
6504 | IEMTLBE_F_PG_CODE_PAGE);
6505 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6506 &pbMem, &pTlbe->fFlagsAndPhysRev);
6507 AssertRCReturn(rc, rc);
6508# ifdef IN_RING3
6509 pTlbe->pbMappingR3 = pbMem;
6510# endif
6511 }
6512
6513 /*
6514 * Check the physical page level access and mapping.
6515 */
6516 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6517 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6518 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6519 { /* probably likely */ }
6520 else
6521 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem,
6522 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6523 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6524 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6525 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6526 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6527
6528 if (pbMem)
6529 {
6530 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6531 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6532 fAccess |= IEM_ACCESS_NOT_LOCKED;
6533 }
6534 else
6535 {
6536 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6537 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6538 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6539 if (rcStrict != VINF_SUCCESS)
6540 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6541 }
6542
6543 void * const pvMem = pbMem;
6544
6545 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6546 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6547 if (fAccess & IEM_ACCESS_TYPE_READ)
6548 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6549
6550#else /* !IEM_WITH_DATA_TLB */
6551
6552 RTGCPHYS GCPhysFirst;
6553 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6554 if (rcStrict != VINF_SUCCESS)
6555 return rcStrict;
6556
6557 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6558 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6559 if (fAccess & IEM_ACCESS_TYPE_READ)
6560 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6561
6562 void *pvMem;
6563 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6564 if (rcStrict != VINF_SUCCESS)
6565 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6566
6567#endif /* !IEM_WITH_DATA_TLB */
6568
6569 /*
6570 * Fill in the mapping table entry.
6571 */
6572 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6573 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6574 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6575 pVCpu->iem.s.cActiveMappings += 1;
6576
6577 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6578 *ppvMem = pvMem;
6579 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6580 AssertCompile(IEM_ACCESS_TYPE_MASK <= 0xf);
6581 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.aMemMappings) < 8);
6582
6583 return VINF_SUCCESS;
6584}
6585
6586
6587/**
6588 * Commits the guest memory if bounce buffered and unmaps it.
6589 *
6590 * @returns Strict VBox status code.
6591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6592 * @param bUnmapInfo Unmap info set by iemMemMap.
6593 */
6594VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6595{
6596 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6597 AssertMsgReturn( (bUnmapInfo & 0x08)
6598 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6599 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
6600 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
6601 VERR_NOT_FOUND);
6602
6603 /* If it's bounce buffered, we may need to write back the buffer. */
6604 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6605 {
6606 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6607 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6608 }
6609 /* Otherwise unlock it. */
6610 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6611 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6612
6613 /* Free the entry. */
6614 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6615 Assert(pVCpu->iem.s.cActiveMappings != 0);
6616 pVCpu->iem.s.cActiveMappings--;
6617 return VINF_SUCCESS;
6618}
6619
6620
6621/**
6622 * Rolls back the guest memory (conceptually only) and unmaps it.
6623 *
6624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6625 * @param bUnmapInfo Unmap info set by iemMemMap.
6626 */
6627void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
6628{
6629 uintptr_t const iMemMap = bUnmapInfo & 0x7;
6630 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
6631 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6632 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
6633 == ((unsigned)bUnmapInfo >> 4),
6634 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
6635
6636 /* Unlock it if necessary. */
6637 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6638 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6639
6640 /* Free the entry. */
6641 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6642 Assert(pVCpu->iem.s.cActiveMappings != 0);
6643 pVCpu->iem.s.cActiveMappings--;
6644}
6645
6646#ifdef IEM_WITH_SETJMP
6647
6648/**
6649 * Maps the specified guest memory for the given kind of access, longjmp on
6650 * error.
6651 *
6652 * This may be using bounce buffering of the memory if it's crossing a page
6653 * boundary or if there is an access handler installed for any of it. Because
6654 * of lock prefix guarantees, we're in for some extra clutter when this
6655 * happens.
6656 *
6657 * This may raise a \#GP, \#SS, \#PF or \#AC.
6658 *
6659 * @returns Pointer to the mapped memory.
6660 *
6661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6662 * @param bUnmapInfo Where to return unmap info to be passed to
6663 * iemMemCommitAndUnmapJmp, iemMemCommitAndUnmapRwSafeJmp,
6664 * iemMemCommitAndUnmapWoSafeJmp,
6665 * iemMemCommitAndUnmapRoSafeJmp,
6666 * iemMemRollbackAndUnmapWoSafe or iemMemRollbackAndUnmap
6667 * when done.
6668 * @param cbMem The number of bytes to map. This is usually 1,
6669 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6670 * string operations it can be up to a page.
6671 * @param iSegReg The index of the segment register to use for
6672 * this access. The base and limits are checked.
6673 * Use UINT8_MAX to indicate that no segmentation
6674 * is required (for IDT, GDT and LDT accesses).
6675 * @param GCPtrMem The address of the guest memory.
6676 * @param fAccess How the memory is being accessed. The
6677 * IEM_ACCESS_TYPE_XXX part is used to figure out how to
6678 * map the memory, while the IEM_ACCESS_WHAT_XXX part is
6679 * used when raising exceptions. The IEM_ACCESS_ATOMIC and
6680 * IEM_ACCESS_PARTIAL_WRITE bits are also allowed to be
6681 * set.
6682 * @param uAlignCtl Alignment control:
6683 * - Bits 15:0 is the alignment mask.
6684 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6685 * IEM_MEMMAP_F_ALIGN_SSE, and
6686 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6687 * Pass zero to skip alignment.
6688 */
6689void *iemMemMapJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6690 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6691{
6692 /*
6693 * Check the input, check segment access and adjust address
6694 * with segment base.
6695 */
6696 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6697 Assert(!(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK | IEM_ACCESS_ATOMIC | IEM_ACCESS_PARTIAL_WRITE)));
6698 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6699
6700 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6701 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6702 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6703
6704 /*
6705 * Alignment check.
6706 */
6707 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6708 { /* likelyish */ }
6709 else
6710 {
6711 /* Misaligned access. */
6712 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6713 {
6714 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6715 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6716 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6717 {
6718 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6719
6720 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6721 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6722 }
6723 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6724 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6725 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6726 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6727 * that's what FXSAVE does on a 10980xe. */
6728 && iemMemAreAlignmentChecksEnabled(pVCpu))
6729 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6730 else
6731 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6732 }
6733
6734#if (defined(RT_ARCH_AMD64) && defined(RT_OS_LINUX)) || defined(RT_ARCH_ARM64)
6735 /* If the access is atomic there are host platform alignmnet restrictions
6736 we need to conform with. */
6737 if ( !(fAccess & IEM_ACCESS_ATOMIC)
6738# if defined(RT_ARCH_AMD64)
6739 || (64U - (GCPtrMem & 63U) >= cbMem) /* split-lock detection. ASSUMES 64 byte cache line. */
6740# elif defined(RT_ARCH_ARM64)
6741 || (16U - (GCPtrMem & 15U) >= cbMem) /* LSE2 allows atomics anywhere within a 16 byte sized & aligned block. */
6742# else
6743# error port me
6744# endif
6745 )
6746 { /* okay */ }
6747 else
6748 {
6749 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv LB %u - misaligned atomic fallback.\n", GCPtrMem, cbMem));
6750 pVCpu->iem.s.cMisalignedAtomics += 1;
6751 IEM_DO_LONGJMP(pVCpu, VINF_EM_EMULATE_SPLIT_LOCK);
6752 }
6753#endif
6754 }
6755
6756 /*
6757 * Figure out which mapping entry to use.
6758 */
6759 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6760 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6761 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6762 {
6763 iMemMap = iemMemMapFindFree(pVCpu);
6764 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6765 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6766 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6767 pVCpu->iem.s.aMemMappings[2].fAccess),
6768 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6769 }
6770
6771 /*
6772 * Crossing a page boundary?
6773 */
6774 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6775 { /* No (likely). */ }
6776 else
6777 {
6778 void *pvMem;
6779 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPtrMem, fAccess);
6780 if (rcStrict == VINF_SUCCESS)
6781 return pvMem;
6782 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6783 }
6784
6785#ifdef IEM_WITH_DATA_TLB
6786 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6787
6788 /*
6789 * Get the TLB entry for this page.
6790 */
6791 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6792 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6793 if (pTlbe->uTag == uTag)
6794 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6795 else
6796 {
6797 pVCpu->iem.s.DataTlb.cTlbMisses++;
6798 PGMPTWALK Walk;
6799 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6800 if (RT_FAILURE(rc))
6801 {
6802 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6803# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6804 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6805 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6806# endif
6807 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6808 }
6809
6810 Assert(Walk.fSucceeded);
6811 pTlbe->uTag = uTag;
6812 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6813 pTlbe->GCPhys = Walk.GCPhys;
6814 pTlbe->pbMappingR3 = NULL;
6815 }
6816
6817 /*
6818 * Check the flags and physical revision.
6819 */
6820 /** @todo make the caller pass these in with fAccess. */
6821 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6822 ? IEMTLBE_F_PT_NO_USER : 0;
6823 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6824 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6825 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6826 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6827 ? IEMTLBE_F_PT_NO_WRITE : 0)
6828 : 0;
6829 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6830 uint8_t *pbMem = NULL;
6831 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6832 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6833# ifdef IN_RING3
6834 pbMem = pTlbe->pbMappingR3;
6835# else
6836 pbMem = NULL;
6837# endif
6838 else
6839 {
6840 /*
6841 * Okay, something isn't quite right or needs refreshing.
6842 */
6843 /* Write to read only memory? */
6844 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6845 {
6846 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6847# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6848 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6849 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6850# endif
6851 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6852 }
6853
6854 /* Kernel memory accessed by userland? */
6855 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6856 {
6857 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6858# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6859 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6860 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6861# endif
6862 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6863 }
6864
6865 /* Set the dirty / access flags.
6866 ASSUMES this is set when the address is translated rather than on commit... */
6867 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6868 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6869 {
6870 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6871 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6872 AssertRC(rc2);
6873 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6874 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6875 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6876 }
6877
6878 /*
6879 * Check if the physical page info needs updating.
6880 */
6881 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6882# ifdef IN_RING3
6883 pbMem = pTlbe->pbMappingR3;
6884# else
6885 pbMem = NULL;
6886# endif
6887 else
6888 {
6889 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6890 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6891 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6892 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6893 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
6894 pTlbe->pbMappingR3 = NULL;
6895 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6896 | IEMTLBE_F_NO_MAPPINGR3
6897 | IEMTLBE_F_PG_NO_READ
6898 | IEMTLBE_F_PG_NO_WRITE
6899 | IEMTLBE_F_PG_UNASSIGNED
6900 | IEMTLBE_F_PG_CODE_PAGE);
6901 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6902 &pbMem, &pTlbe->fFlagsAndPhysRev);
6903 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6904# ifdef IN_RING3
6905 pTlbe->pbMappingR3 = pbMem;
6906# endif
6907 }
6908
6909 /*
6910 * Check the physical page level access and mapping.
6911 */
6912 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6913 { /* probably likely */ }
6914 else
6915 {
6916 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, pbUnmapInfo, cbMem,
6917 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6918 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6919 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6920 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6921 if (rcStrict == VINF_SUCCESS)
6922 return pbMem;
6923 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6924 }
6925 }
6926 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6927
6928 if (pbMem)
6929 {
6930 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6931 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6932 fAccess |= IEM_ACCESS_NOT_LOCKED;
6933 }
6934 else
6935 {
6936 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6937 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6938 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6939 if (rcStrict == VINF_SUCCESS)
6940 {
6941 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6942 return pbMem;
6943 }
6944 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6945 }
6946
6947 void * const pvMem = pbMem;
6948
6949 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6950 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6951 if (fAccess & IEM_ACCESS_TYPE_READ)
6952 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6953
6954#else /* !IEM_WITH_DATA_TLB */
6955
6956
6957 RTGCPHYS GCPhysFirst;
6958 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6959 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6960 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6961
6962 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6963 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6964 if (fAccess & IEM_ACCESS_TYPE_READ)
6965 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6966
6967 void *pvMem;
6968 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6969 if (rcStrict == VINF_SUCCESS)
6970 { /* likely */ }
6971 else
6972 {
6973 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, pbUnmapInfo, cbMem, GCPhysFirst, fAccess, rcStrict);
6974 if (rcStrict == VINF_SUCCESS)
6975 return pvMem;
6976 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6977 }
6978
6979#endif /* !IEM_WITH_DATA_TLB */
6980
6981 /*
6982 * Fill in the mapping table entry.
6983 */
6984 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6985 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6986 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6987 pVCpu->iem.s.cActiveMappings++;
6988
6989 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6990
6991 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
6992 return pvMem;
6993}
6994
6995
6996/**
6997 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6998 *
6999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7000 * @param pvMem The mapping.
7001 * @param fAccess The kind of access.
7002 */
7003void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7004{
7005 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7006 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
7007 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7008 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7009 == ((unsigned)bUnmapInfo >> 4),
7010 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
7011
7012 /* If it's bounce buffered, we may need to write back the buffer. */
7013 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7014 {
7015 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7016 {
7017 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
7018 if (rcStrict == VINF_SUCCESS)
7019 return;
7020 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
7021 }
7022 }
7023 /* Otherwise unlock it. */
7024 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7025 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7026
7027 /* Free the entry. */
7028 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7029 Assert(pVCpu->iem.s.cActiveMappings != 0);
7030 pVCpu->iem.s.cActiveMappings--;
7031}
7032
7033
7034/** Fallback for iemMemCommitAndUnmapRwJmp. */
7035void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7036{
7037 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7038 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7039}
7040
7041
7042/** Fallback for iemMemCommitAndUnmapAtJmp. */
7043void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7044{
7045 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
7046 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7047}
7048
7049
7050/** Fallback for iemMemCommitAndUnmapWoJmp. */
7051void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7052{
7053 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7054 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7055}
7056
7057
7058/** Fallback for iemMemCommitAndUnmapRoJmp. */
7059void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
7060{
7061 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
7062 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7063}
7064
7065
7066/** Fallback for iemMemRollbackAndUnmapWo. */
7067void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7068{
7069 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
7070 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
7071}
7072
7073#endif /* IEM_WITH_SETJMP */
7074
7075#ifndef IN_RING3
7076/**
7077 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
7078 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
7079 *
7080 * Allows the instruction to be completed and retired, while the IEM user will
7081 * return to ring-3 immediately afterwards and do the postponed writes there.
7082 *
7083 * @returns VBox status code (no strict statuses). Caller must check
7084 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
7085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7086 * @param pvMem The mapping.
7087 * @param fAccess The kind of access.
7088 */
7089VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7090{
7091 uintptr_t const iMemMap = bUnmapInfo & 0x7;
7092 AssertMsgReturn( (bUnmapInfo & 0x08)
7093 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
7094 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
7095 == ((unsigned)bUnmapInfo >> 4),
7096 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
7097 VERR_NOT_FOUND);
7098
7099 /* If it's bounce buffered, we may need to write back the buffer. */
7100 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
7101 {
7102 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
7103 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
7104 }
7105 /* Otherwise unlock it. */
7106 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
7107 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7108
7109 /* Free the entry. */
7110 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7111 Assert(pVCpu->iem.s.cActiveMappings != 0);
7112 pVCpu->iem.s.cActiveMappings--;
7113 return VINF_SUCCESS;
7114}
7115#endif
7116
7117
7118/**
7119 * Rollbacks mappings, releasing page locks and such.
7120 *
7121 * The caller shall only call this after checking cActiveMappings.
7122 *
7123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7124 */
7125void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
7126{
7127 Assert(pVCpu->iem.s.cActiveMappings > 0);
7128
7129 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
7130 while (iMemMap-- > 0)
7131 {
7132 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
7133 if (fAccess != IEM_ACCESS_INVALID)
7134 {
7135 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
7136 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7137 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
7138 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
7139 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
7140 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
7141 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
7142 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
7143 pVCpu->iem.s.cActiveMappings--;
7144 }
7145 }
7146}
7147
7148
7149/*
7150 * Instantiate R/W templates.
7151 */
7152#define TMPL_MEM_WITH_STACK
7153
7154#define TMPL_MEM_TYPE uint8_t
7155#define TMPL_MEM_FN_SUFF U8
7156#define TMPL_MEM_FMT_TYPE "%#04x"
7157#define TMPL_MEM_FMT_DESC "byte"
7158#include "IEMAllMemRWTmpl.cpp.h"
7159
7160#define TMPL_MEM_TYPE uint16_t
7161#define TMPL_MEM_FN_SUFF U16
7162#define TMPL_MEM_FMT_TYPE "%#06x"
7163#define TMPL_MEM_FMT_DESC "word"
7164#include "IEMAllMemRWTmpl.cpp.h"
7165
7166#define TMPL_WITH_PUSH_SREG
7167#define TMPL_MEM_TYPE uint32_t
7168#define TMPL_MEM_FN_SUFF U32
7169#define TMPL_MEM_FMT_TYPE "%#010x"
7170#define TMPL_MEM_FMT_DESC "dword"
7171#include "IEMAllMemRWTmpl.cpp.h"
7172#undef TMPL_WITH_PUSH_SREG
7173
7174#define TMPL_MEM_TYPE uint64_t
7175#define TMPL_MEM_FN_SUFF U64
7176#define TMPL_MEM_FMT_TYPE "%#018RX64"
7177#define TMPL_MEM_FMT_DESC "qword"
7178#include "IEMAllMemRWTmpl.cpp.h"
7179
7180#undef TMPL_MEM_WITH_STACK
7181
7182#define TMPL_MEM_TYPE uint64_t
7183#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7184#define TMPL_MEM_FN_SUFF U64AlignedU128
7185#define TMPL_MEM_FMT_TYPE "%#018RX64"
7186#define TMPL_MEM_FMT_DESC "qword"
7187#include "IEMAllMemRWTmpl.cpp.h"
7188
7189/* See IEMAllMemRWTmplInline.cpp.h */
7190#define TMPL_MEM_BY_REF
7191
7192#define TMPL_MEM_TYPE RTFLOAT80U
7193#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1)
7194#define TMPL_MEM_FN_SUFF R80
7195#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7196#define TMPL_MEM_FMT_DESC "tword"
7197#include "IEMAllMemRWTmpl.cpp.h"
7198
7199#define TMPL_MEM_TYPE RTPBCD80U
7200#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) - 1) /** @todo testcase: 80-bit BCD alignment */
7201#define TMPL_MEM_FN_SUFF D80
7202#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
7203#define TMPL_MEM_FMT_DESC "tword"
7204#include "IEMAllMemRWTmpl.cpp.h"
7205
7206#define TMPL_MEM_TYPE RTUINT128U
7207#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7208#define TMPL_MEM_FN_SUFF U128
7209#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7210#define TMPL_MEM_FMT_DESC "dqword"
7211#include "IEMAllMemRWTmpl.cpp.h"
7212
7213#define TMPL_MEM_TYPE RTUINT128U
7214#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1)
7215#define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE)
7216#define TMPL_MEM_FN_SUFF U128AlignedSse
7217#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7218#define TMPL_MEM_FMT_DESC "dqword"
7219#include "IEMAllMemRWTmpl.cpp.h"
7220
7221#define TMPL_MEM_TYPE RTUINT128U
7222#define TMPL_MEM_TYPE_ALIGN 0
7223#define TMPL_MEM_FN_SUFF U128NoAc
7224#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
7225#define TMPL_MEM_FMT_DESC "dqword"
7226#include "IEMAllMemRWTmpl.cpp.h"
7227
7228#define TMPL_MEM_TYPE RTUINT256U
7229#define TMPL_MEM_TYPE_ALIGN 0
7230#define TMPL_MEM_FN_SUFF U256NoAc
7231#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7232#define TMPL_MEM_FMT_DESC "qqword"
7233#include "IEMAllMemRWTmpl.cpp.h"
7234
7235#define TMPL_MEM_TYPE RTUINT256U
7236#define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT256U) - 1)
7237#define TMPL_MEM_MAP_FLAGS_ADD IEM_MEMMAP_F_ALIGN_GP
7238#define TMPL_MEM_FN_SUFF U256AlignedAvx
7239#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
7240#define TMPL_MEM_FMT_DESC "qqword"
7241#include "IEMAllMemRWTmpl.cpp.h"
7242
7243/**
7244 * Fetches a data dword and zero extends it to a qword.
7245 *
7246 * @returns Strict VBox status code.
7247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7248 * @param pu64Dst Where to return the qword.
7249 * @param iSegReg The index of the segment register to use for
7250 * this access. The base and limits are checked.
7251 * @param GCPtrMem The address of the guest memory.
7252 */
7253VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7254{
7255 /* The lazy approach for now... */
7256 uint8_t bUnmapInfo;
7257 uint32_t const *pu32Src;
7258 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem,
7259 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7260 if (rc == VINF_SUCCESS)
7261 {
7262 *pu64Dst = *pu32Src;
7263 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7264 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7265 }
7266 return rc;
7267}
7268
7269
7270#ifdef SOME_UNUSED_FUNCTION
7271/**
7272 * Fetches a data dword and sign extends it to a qword.
7273 *
7274 * @returns Strict VBox status code.
7275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7276 * @param pu64Dst Where to return the sign extended value.
7277 * @param iSegReg The index of the segment register to use for
7278 * this access. The base and limits are checked.
7279 * @param GCPtrMem The address of the guest memory.
7280 */
7281VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7282{
7283 /* The lazy approach for now... */
7284 uint8_t bUnmapInfo;
7285 int32_t const *pi32Src;
7286 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, &bUnmapInfo, sizeof(*pi32Src), iSegReg, GCPtrMem,
7287 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7288 if (rc == VINF_SUCCESS)
7289 {
7290 *pu64Dst = *pi32Src;
7291 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7292 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7293 }
7294#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7295 else
7296 *pu64Dst = 0;
7297#endif
7298 return rc;
7299}
7300#endif
7301
7302
7303/**
7304 * Fetches a descriptor register (lgdt, lidt).
7305 *
7306 * @returns Strict VBox status code.
7307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7308 * @param pcbLimit Where to return the limit.
7309 * @param pGCPtrBase Where to return the base.
7310 * @param iSegReg The index of the segment register to use for
7311 * this access. The base and limits are checked.
7312 * @param GCPtrMem The address of the guest memory.
7313 * @param enmOpSize The effective operand size.
7314 */
7315VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7316 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7317{
7318 /*
7319 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7320 * little special:
7321 * - The two reads are done separately.
7322 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7323 * - We suspect the 386 to actually commit the limit before the base in
7324 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7325 * don't try emulate this eccentric behavior, because it's not well
7326 * enough understood and rather hard to trigger.
7327 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7328 */
7329 VBOXSTRICTRC rcStrict;
7330 if (IEM_IS_64BIT_CODE(pVCpu))
7331 {
7332 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7333 if (rcStrict == VINF_SUCCESS)
7334 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7335 }
7336 else
7337 {
7338 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7339 if (enmOpSize == IEMMODE_32BIT)
7340 {
7341 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7342 {
7343 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7344 if (rcStrict == VINF_SUCCESS)
7345 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7346 }
7347 else
7348 {
7349 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7350 if (rcStrict == VINF_SUCCESS)
7351 {
7352 *pcbLimit = (uint16_t)uTmp;
7353 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7354 }
7355 }
7356 if (rcStrict == VINF_SUCCESS)
7357 *pGCPtrBase = uTmp;
7358 }
7359 else
7360 {
7361 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7362 if (rcStrict == VINF_SUCCESS)
7363 {
7364 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7365 if (rcStrict == VINF_SUCCESS)
7366 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7367 }
7368 }
7369 }
7370 return rcStrict;
7371}
7372
7373
7374/**
7375 * Stores a data dqword, SSE aligned.
7376 *
7377 * @returns Strict VBox status code.
7378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7379 * @param iSegReg The index of the segment register to use for
7380 * this access. The base and limits are checked.
7381 * @param GCPtrMem The address of the guest memory.
7382 * @param u128Value The value to store.
7383 */
7384VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7385{
7386 /* The lazy approach for now... */
7387 uint8_t bUnmapInfo;
7388 PRTUINT128U pu128Dst;
7389 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7390 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7391 if (rc == VINF_SUCCESS)
7392 {
7393 pu128Dst->au64[0] = u128Value.au64[0];
7394 pu128Dst->au64[1] = u128Value.au64[1];
7395 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7396 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7397 }
7398 return rc;
7399}
7400
7401
7402#ifdef IEM_WITH_SETJMP
7403/**
7404 * Stores a data dqword, SSE aligned.
7405 *
7406 * @returns Strict VBox status code.
7407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7408 * @param iSegReg The index of the segment register to use for
7409 * this access. The base and limits are checked.
7410 * @param GCPtrMem The address of the guest memory.
7411 * @param u128Value The value to store.
7412 */
7413void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7414 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7415{
7416 /* The lazy approach for now... */
7417 uint8_t bUnmapInfo;
7418 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7419 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7420 pu128Dst->au64[0] = u128Value.au64[0];
7421 pu128Dst->au64[1] = u128Value.au64[1];
7422 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7423 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7424}
7425#endif
7426
7427
7428/**
7429 * Stores a data dqword.
7430 *
7431 * @returns Strict VBox status code.
7432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7433 * @param iSegReg The index of the segment register to use for
7434 * this access. The base and limits are checked.
7435 * @param GCPtrMem The address of the guest memory.
7436 * @param pu256Value Pointer to the value to store.
7437 */
7438VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7439{
7440 /* The lazy approach for now... */
7441 uint8_t bUnmapInfo;
7442 PRTUINT256U pu256Dst;
7443 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7444 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7445 if (rc == VINF_SUCCESS)
7446 {
7447 pu256Dst->au64[0] = pu256Value->au64[0];
7448 pu256Dst->au64[1] = pu256Value->au64[1];
7449 pu256Dst->au64[2] = pu256Value->au64[2];
7450 pu256Dst->au64[3] = pu256Value->au64[3];
7451 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7452 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7453 }
7454 return rc;
7455}
7456
7457
7458#ifdef IEM_WITH_SETJMP
7459/**
7460 * Stores a data dqword, longjmp on error.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 * @param iSegReg The index of the segment register to use for
7464 * this access. The base and limits are checked.
7465 * @param GCPtrMem The address of the guest memory.
7466 * @param pu256Value Pointer to the value to store.
7467 */
7468void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7469{
7470 /* The lazy approach for now... */
7471 uint8_t bUnmapInfo;
7472 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7473 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7474 pu256Dst->au64[0] = pu256Value->au64[0];
7475 pu256Dst->au64[1] = pu256Value->au64[1];
7476 pu256Dst->au64[2] = pu256Value->au64[2];
7477 pu256Dst->au64[3] = pu256Value->au64[3];
7478 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
7479 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7480}
7481#endif
7482
7483
7484/**
7485 * Stores a descriptor register (sgdt, sidt).
7486 *
7487 * @returns Strict VBox status code.
7488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7489 * @param cbLimit The limit.
7490 * @param GCPtrBase The base address.
7491 * @param iSegReg The index of the segment register to use for
7492 * this access. The base and limits are checked.
7493 * @param GCPtrMem The address of the guest memory.
7494 */
7495VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7496{
7497 /*
7498 * The SIDT and SGDT instructions actually stores the data using two
7499 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7500 * does not respond to opsize prefixes.
7501 */
7502 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7503 if (rcStrict == VINF_SUCCESS)
7504 {
7505 if (IEM_IS_16BIT_CODE(pVCpu))
7506 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7507 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7508 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7509 else if (IEM_IS_32BIT_CODE(pVCpu))
7510 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7511 else
7512 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7513 }
7514 return rcStrict;
7515}
7516
7517
7518/**
7519 * Begin a special stack push (used by interrupt, exceptions and such).
7520 *
7521 * This will raise \#SS or \#PF if appropriate.
7522 *
7523 * @returns Strict VBox status code.
7524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7525 * @param cbMem The number of bytes to push onto the stack.
7526 * @param cbAlign The alignment mask (7, 3, 1).
7527 * @param ppvMem Where to return the pointer to the stack memory.
7528 * As with the other memory functions this could be
7529 * direct access or bounce buffered access, so
7530 * don't commit register until the commit call
7531 * succeeds.
7532 * @param pbUnmapInfo Where to store unmap info for
7533 * iemMemStackPushCommitSpecial.
7534 * @param puNewRsp Where to return the new RSP value. This must be
7535 * passed unchanged to
7536 * iemMemStackPushCommitSpecial().
7537 */
7538VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7539 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7540{
7541 Assert(cbMem < UINT8_MAX);
7542 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7543 return iemMemMap(pVCpu, ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W, cbAlign);
7544}
7545
7546
7547/**
7548 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7549 *
7550 * This will update the rSP.
7551 *
7552 * @returns Strict VBox status code.
7553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7554 * @param bUnmapInfo Unmap info set by iemMemStackPushBeginSpecial.
7555 * @param uNewRsp The new RSP value returned by
7556 * iemMemStackPushBeginSpecial().
7557 */
7558VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT
7559{
7560 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7561 if (rcStrict == VINF_SUCCESS)
7562 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7563 return rcStrict;
7564}
7565
7566
7567/**
7568 * Begin a special stack pop (used by iret, retf and such).
7569 *
7570 * This will raise \#SS or \#PF if appropriate.
7571 *
7572 * @returns Strict VBox status code.
7573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7574 * @param cbMem The number of bytes to pop from the stack.
7575 * @param cbAlign The alignment mask (7, 3, 1).
7576 * @param ppvMem Where to return the pointer to the stack memory.
7577 * @param pbUnmapInfo Where to store unmap info for
7578 * iemMemStackPopDoneSpecial.
7579 * @param puNewRsp Where to return the new RSP value. This must be
7580 * assigned to CPUMCTX::rsp manually some time
7581 * after iemMemStackPopDoneSpecial() has been
7582 * called.
7583 */
7584VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7585 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT
7586{
7587 Assert(cbMem < UINT8_MAX);
7588 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7589 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7590}
7591
7592
7593/**
7594 * Continue a special stack pop (used by iret and retf), for the purpose of
7595 * retrieving a new stack pointer.
7596 *
7597 * This will raise \#SS or \#PF if appropriate.
7598 *
7599 * @returns Strict VBox status code.
7600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7601 * @param off Offset from the top of the stack. This is zero
7602 * except in the retf case.
7603 * @param cbMem The number of bytes to pop from the stack.
7604 * @param ppvMem Where to return the pointer to the stack memory.
7605 * @param pbUnmapInfo Where to store unmap info for
7606 * iemMemStackPopDoneSpecial.
7607 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7608 * return this because all use of this function is
7609 * to retrieve a new value and anything we return
7610 * here would be discarded.)
7611 */
7612VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7613 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT
7614{
7615 Assert(cbMem < UINT8_MAX);
7616
7617 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7618 RTGCPTR GCPtrTop;
7619 if (IEM_IS_64BIT_CODE(pVCpu))
7620 GCPtrTop = uCurNewRsp;
7621 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7622 GCPtrTop = (uint32_t)uCurNewRsp;
7623 else
7624 GCPtrTop = (uint16_t)uCurNewRsp;
7625
7626 return iemMemMap(pVCpu, (void **)ppvMem, pbUnmapInfo, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7627 0 /* checked in iemMemStackPopBeginSpecial */);
7628}
7629
7630
7631/**
7632 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7633 * iemMemStackPopContinueSpecial).
7634 *
7635 * The caller will manually commit the rSP.
7636 *
7637 * @returns Strict VBox status code.
7638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7639 * @param bUnmapInfo Unmap information returned by
7640 * iemMemStackPopBeginSpecial() or
7641 * iemMemStackPopContinueSpecial().
7642 */
7643VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
7644{
7645 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7646}
7647
7648
7649/**
7650 * Fetches a system table byte.
7651 *
7652 * @returns Strict VBox status code.
7653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7654 * @param pbDst Where to return the byte.
7655 * @param iSegReg The index of the segment register to use for
7656 * this access. The base and limits are checked.
7657 * @param GCPtrMem The address of the guest memory.
7658 */
7659VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7660{
7661 /* The lazy approach for now... */
7662 uint8_t bUnmapInfo;
7663 uint8_t const *pbSrc;
7664 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, &bUnmapInfo, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7665 if (rc == VINF_SUCCESS)
7666 {
7667 *pbDst = *pbSrc;
7668 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7669 }
7670 return rc;
7671}
7672
7673
7674/**
7675 * Fetches a system table word.
7676 *
7677 * @returns Strict VBox status code.
7678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7679 * @param pu16Dst Where to return the word.
7680 * @param iSegReg The index of the segment register to use for
7681 * this access. The base and limits are checked.
7682 * @param GCPtrMem The address of the guest memory.
7683 */
7684VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7685{
7686 /* The lazy approach for now... */
7687 uint8_t bUnmapInfo;
7688 uint16_t const *pu16Src;
7689 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, &bUnmapInfo, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7690 if (rc == VINF_SUCCESS)
7691 {
7692 *pu16Dst = *pu16Src;
7693 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7694 }
7695 return rc;
7696}
7697
7698
7699/**
7700 * Fetches a system table dword.
7701 *
7702 * @returns Strict VBox status code.
7703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7704 * @param pu32Dst Where to return the dword.
7705 * @param iSegReg The index of the segment register to use for
7706 * this access. The base and limits are checked.
7707 * @param GCPtrMem The address of the guest memory.
7708 */
7709VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7710{
7711 /* The lazy approach for now... */
7712 uint8_t bUnmapInfo;
7713 uint32_t const *pu32Src;
7714 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, &bUnmapInfo, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7715 if (rc == VINF_SUCCESS)
7716 {
7717 *pu32Dst = *pu32Src;
7718 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7719 }
7720 return rc;
7721}
7722
7723
7724/**
7725 * Fetches a system table qword.
7726 *
7727 * @returns Strict VBox status code.
7728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7729 * @param pu64Dst Where to return the qword.
7730 * @param iSegReg The index of the segment register to use for
7731 * this access. The base and limits are checked.
7732 * @param GCPtrMem The address of the guest memory.
7733 */
7734VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7735{
7736 /* The lazy approach for now... */
7737 uint8_t bUnmapInfo;
7738 uint64_t const *pu64Src;
7739 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, &bUnmapInfo, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7740 if (rc == VINF_SUCCESS)
7741 {
7742 *pu64Dst = *pu64Src;
7743 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7744 }
7745 return rc;
7746}
7747
7748
7749/**
7750 * Fetches a descriptor table entry with caller specified error code.
7751 *
7752 * @returns Strict VBox status code.
7753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7754 * @param pDesc Where to return the descriptor table entry.
7755 * @param uSel The selector which table entry to fetch.
7756 * @param uXcpt The exception to raise on table lookup error.
7757 * @param uErrorCode The error code associated with the exception.
7758 */
7759static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7760 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7761{
7762 AssertPtr(pDesc);
7763 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7764
7765 /** @todo did the 286 require all 8 bytes to be accessible? */
7766 /*
7767 * Get the selector table base and check bounds.
7768 */
7769 RTGCPTR GCPtrBase;
7770 if (uSel & X86_SEL_LDT)
7771 {
7772 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7773 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7774 {
7775 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7776 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7777 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7778 uErrorCode, 0);
7779 }
7780
7781 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7782 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7783 }
7784 else
7785 {
7786 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7787 {
7788 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7789 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7790 uErrorCode, 0);
7791 }
7792 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7793 }
7794
7795 /*
7796 * Read the legacy descriptor and maybe the long mode extensions if
7797 * required.
7798 */
7799 VBOXSTRICTRC rcStrict;
7800 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7801 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7802 else
7803 {
7804 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7805 if (rcStrict == VINF_SUCCESS)
7806 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
7807 if (rcStrict == VINF_SUCCESS)
7808 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
7809 if (rcStrict == VINF_SUCCESS)
7810 pDesc->Legacy.au16[3] = 0;
7811 else
7812 return rcStrict;
7813 }
7814
7815 if (rcStrict == VINF_SUCCESS)
7816 {
7817 if ( !IEM_IS_LONG_MODE(pVCpu)
7818 || pDesc->Legacy.Gen.u1DescType)
7819 pDesc->Long.au64[1] = 0;
7820 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
7821 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
7822 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
7823 else
7824 {
7825 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
7826 /** @todo is this the right exception? */
7827 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
7828 }
7829 }
7830 return rcStrict;
7831}
7832
7833
7834/**
7835 * Fetches a descriptor table entry.
7836 *
7837 * @returns Strict VBox status code.
7838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7839 * @param pDesc Where to return the descriptor table entry.
7840 * @param uSel The selector which table entry to fetch.
7841 * @param uXcpt The exception to raise on table lookup error.
7842 */
7843VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
7844{
7845 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
7846}
7847
7848
7849/**
7850 * Marks the selector descriptor as accessed (only non-system descriptors).
7851 *
7852 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
7853 * will therefore skip the limit checks.
7854 *
7855 * @returns Strict VBox status code.
7856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7857 * @param uSel The selector.
7858 */
7859VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
7860{
7861 /*
7862 * Get the selector table base and calculate the entry address.
7863 */
7864 RTGCPTR GCPtr = uSel & X86_SEL_LDT
7865 ? pVCpu->cpum.GstCtx.ldtr.u64Base
7866 : pVCpu->cpum.GstCtx.gdtr.pGdt;
7867 GCPtr += uSel & X86_SEL_MASK;
7868
7869 /*
7870 * ASMAtomicBitSet will assert if the address is misaligned, so do some
7871 * ugly stuff to avoid this. This will make sure it's an atomic access
7872 * as well more or less remove any question about 8-bit or 32-bit accesss.
7873 */
7874 VBOXSTRICTRC rcStrict;
7875 uint8_t bUnmapInfo;
7876 uint32_t volatile *pu32;
7877 if ((GCPtr & 3) == 0)
7878 {
7879 /* The normal case, map the 32-bit bits around the accessed bit (40). */
7880 GCPtr += 2 + 2;
7881 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7882 if (rcStrict != VINF_SUCCESS)
7883 return rcStrict;
7884 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
7885 }
7886 else
7887 {
7888 /* The misaligned GDT/LDT case, map the whole thing. */
7889 rcStrict = iemMemMap(pVCpu, (void **)&pu32, &bUnmapInfo, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
7890 if (rcStrict != VINF_SUCCESS)
7891 return rcStrict;
7892 switch ((uintptr_t)pu32 & 3)
7893 {
7894 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
7895 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
7896 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
7897 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
7898 }
7899 }
7900
7901 return iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7902}
7903
7904
7905#undef LOG_GROUP
7906#define LOG_GROUP LOG_GROUP_IEM
7907
7908/** @} */
7909
7910/** @name Opcode Helpers.
7911 * @{
7912 */
7913
7914/**
7915 * Calculates the effective address of a ModR/M memory operand.
7916 *
7917 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
7918 *
7919 * @return Strict VBox status code.
7920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7921 * @param bRm The ModRM byte.
7922 * @param cbImmAndRspOffset - First byte: The size of any immediate
7923 * following the effective address opcode bytes
7924 * (only for RIP relative addressing).
7925 * - Second byte: RSP displacement (for POP [ESP]).
7926 * @param pGCPtrEff Where to return the effective address.
7927 */
7928VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
7929{
7930 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
7931# define SET_SS_DEF() \
7932 do \
7933 { \
7934 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
7935 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
7936 } while (0)
7937
7938 if (!IEM_IS_64BIT_CODE(pVCpu))
7939 {
7940/** @todo Check the effective address size crap! */
7941 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
7942 {
7943 uint16_t u16EffAddr;
7944
7945 /* Handle the disp16 form with no registers first. */
7946 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
7947 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
7948 else
7949 {
7950 /* Get the displacment. */
7951 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
7952 {
7953 case 0: u16EffAddr = 0; break;
7954 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
7955 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
7956 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
7957 }
7958
7959 /* Add the base and index registers to the disp. */
7960 switch (bRm & X86_MODRM_RM_MASK)
7961 {
7962 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
7963 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
7964 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
7965 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
7966 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
7967 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
7968 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
7969 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
7970 }
7971 }
7972
7973 *pGCPtrEff = u16EffAddr;
7974 }
7975 else
7976 {
7977 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
7978 uint32_t u32EffAddr;
7979
7980 /* Handle the disp32 form with no registers first. */
7981 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
7982 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
7983 else
7984 {
7985 /* Get the register (or SIB) value. */
7986 switch ((bRm & X86_MODRM_RM_MASK))
7987 {
7988 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
7989 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
7990 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
7991 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
7992 case 4: /* SIB */
7993 {
7994 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
7995
7996 /* Get the index and scale it. */
7997 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
7998 {
7999 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8000 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8001 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8002 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8003 case 4: u32EffAddr = 0; /*none */ break;
8004 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8005 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8006 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8007 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8008 }
8009 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8010
8011 /* add base */
8012 switch (bSib & X86_SIB_BASE_MASK)
8013 {
8014 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8015 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8016 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8017 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8018 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8019 case 5:
8020 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8021 {
8022 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8023 SET_SS_DEF();
8024 }
8025 else
8026 {
8027 uint32_t u32Disp;
8028 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8029 u32EffAddr += u32Disp;
8030 }
8031 break;
8032 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8033 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8034 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8035 }
8036 break;
8037 }
8038 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8039 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8040 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8041 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8042 }
8043
8044 /* Get and add the displacement. */
8045 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8046 {
8047 case 0:
8048 break;
8049 case 1:
8050 {
8051 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8052 u32EffAddr += i8Disp;
8053 break;
8054 }
8055 case 2:
8056 {
8057 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8058 u32EffAddr += u32Disp;
8059 break;
8060 }
8061 default:
8062 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8063 }
8064
8065 }
8066 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8067 *pGCPtrEff = u32EffAddr;
8068 }
8069 }
8070 else
8071 {
8072 uint64_t u64EffAddr;
8073
8074 /* Handle the rip+disp32 form with no registers first. */
8075 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8076 {
8077 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8078 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8079 }
8080 else
8081 {
8082 /* Get the register (or SIB) value. */
8083 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8084 {
8085 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8086 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8087 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8088 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8089 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8090 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8091 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8092 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8093 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8094 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8095 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8096 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8097 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8098 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8099 /* SIB */
8100 case 4:
8101 case 12:
8102 {
8103 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8104
8105 /* Get the index and scale it. */
8106 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8107 {
8108 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8109 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8110 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8111 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8112 case 4: u64EffAddr = 0; /*none */ break;
8113 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8114 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8115 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8116 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8117 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8118 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8119 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8120 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8121 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8122 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8123 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8124 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8125 }
8126 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8127
8128 /* add base */
8129 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8130 {
8131 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8132 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8133 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8134 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8135 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8136 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8137 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8138 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8139 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8140 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8141 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8142 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8143 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8144 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8145 /* complicated encodings */
8146 case 5:
8147 case 13:
8148 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8149 {
8150 if (!pVCpu->iem.s.uRexB)
8151 {
8152 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8153 SET_SS_DEF();
8154 }
8155 else
8156 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8157 }
8158 else
8159 {
8160 uint32_t u32Disp;
8161 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8162 u64EffAddr += (int32_t)u32Disp;
8163 }
8164 break;
8165 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8166 }
8167 break;
8168 }
8169 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8170 }
8171
8172 /* Get and add the displacement. */
8173 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8174 {
8175 case 0:
8176 break;
8177 case 1:
8178 {
8179 int8_t i8Disp;
8180 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8181 u64EffAddr += i8Disp;
8182 break;
8183 }
8184 case 2:
8185 {
8186 uint32_t u32Disp;
8187 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8188 u64EffAddr += (int32_t)u32Disp;
8189 break;
8190 }
8191 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8192 }
8193
8194 }
8195
8196 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8197 *pGCPtrEff = u64EffAddr;
8198 else
8199 {
8200 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8201 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8202 }
8203 }
8204
8205 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8206 return VINF_SUCCESS;
8207}
8208
8209
8210#ifdef IEM_WITH_SETJMP
8211/**
8212 * Calculates the effective address of a ModR/M memory operand.
8213 *
8214 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8215 *
8216 * May longjmp on internal error.
8217 *
8218 * @return The effective address.
8219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8220 * @param bRm The ModRM byte.
8221 * @param cbImmAndRspOffset - First byte: The size of any immediate
8222 * following the effective address opcode bytes
8223 * (only for RIP relative addressing).
8224 * - Second byte: RSP displacement (for POP [ESP]).
8225 */
8226RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8227{
8228 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8229# define SET_SS_DEF() \
8230 do \
8231 { \
8232 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8233 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8234 } while (0)
8235
8236 if (!IEM_IS_64BIT_CODE(pVCpu))
8237 {
8238/** @todo Check the effective address size crap! */
8239 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8240 {
8241 uint16_t u16EffAddr;
8242
8243 /* Handle the disp16 form with no registers first. */
8244 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8245 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8246 else
8247 {
8248 /* Get the displacment. */
8249 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8250 {
8251 case 0: u16EffAddr = 0; break;
8252 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8253 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8254 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8255 }
8256
8257 /* Add the base and index registers to the disp. */
8258 switch (bRm & X86_MODRM_RM_MASK)
8259 {
8260 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8261 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8262 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8263 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8264 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8265 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8266 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8267 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8268 }
8269 }
8270
8271 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8272 return u16EffAddr;
8273 }
8274
8275 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8276 uint32_t u32EffAddr;
8277
8278 /* Handle the disp32 form with no registers first. */
8279 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8280 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8281 else
8282 {
8283 /* Get the register (or SIB) value. */
8284 switch ((bRm & X86_MODRM_RM_MASK))
8285 {
8286 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8287 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8288 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8289 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8290 case 4: /* SIB */
8291 {
8292 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8293
8294 /* Get the index and scale it. */
8295 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8296 {
8297 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8298 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8299 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8300 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8301 case 4: u32EffAddr = 0; /*none */ break;
8302 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8303 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8304 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8305 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8306 }
8307 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8308
8309 /* add base */
8310 switch (bSib & X86_SIB_BASE_MASK)
8311 {
8312 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8313 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8314 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8315 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8316 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8317 case 5:
8318 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8319 {
8320 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8321 SET_SS_DEF();
8322 }
8323 else
8324 {
8325 uint32_t u32Disp;
8326 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8327 u32EffAddr += u32Disp;
8328 }
8329 break;
8330 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8331 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8332 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8333 }
8334 break;
8335 }
8336 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8337 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8338 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8339 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8340 }
8341
8342 /* Get and add the displacement. */
8343 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8344 {
8345 case 0:
8346 break;
8347 case 1:
8348 {
8349 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8350 u32EffAddr += i8Disp;
8351 break;
8352 }
8353 case 2:
8354 {
8355 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8356 u32EffAddr += u32Disp;
8357 break;
8358 }
8359 default:
8360 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8361 }
8362 }
8363
8364 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8365 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8366 return u32EffAddr;
8367 }
8368
8369 uint64_t u64EffAddr;
8370
8371 /* Handle the rip+disp32 form with no registers first. */
8372 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8373 {
8374 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8375 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8376 }
8377 else
8378 {
8379 /* Get the register (or SIB) value. */
8380 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8381 {
8382 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8383 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8384 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8385 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8386 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8387 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8388 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8389 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8390 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8391 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8392 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8393 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8394 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8395 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8396 /* SIB */
8397 case 4:
8398 case 12:
8399 {
8400 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8401
8402 /* Get the index and scale it. */
8403 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8404 {
8405 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8406 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8407 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8408 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8409 case 4: u64EffAddr = 0; /*none */ break;
8410 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8411 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8412 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8413 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8414 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8415 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8416 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8417 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8418 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8419 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8420 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8421 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8422 }
8423 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8424
8425 /* add base */
8426 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8427 {
8428 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8429 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8430 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8431 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8432 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8433 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8434 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8435 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8436 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8437 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8438 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8439 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8440 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8441 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8442 /* complicated encodings */
8443 case 5:
8444 case 13:
8445 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8446 {
8447 if (!pVCpu->iem.s.uRexB)
8448 {
8449 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8450 SET_SS_DEF();
8451 }
8452 else
8453 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8454 }
8455 else
8456 {
8457 uint32_t u32Disp;
8458 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8459 u64EffAddr += (int32_t)u32Disp;
8460 }
8461 break;
8462 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8463 }
8464 break;
8465 }
8466 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8467 }
8468
8469 /* Get and add the displacement. */
8470 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8471 {
8472 case 0:
8473 break;
8474 case 1:
8475 {
8476 int8_t i8Disp;
8477 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8478 u64EffAddr += i8Disp;
8479 break;
8480 }
8481 case 2:
8482 {
8483 uint32_t u32Disp;
8484 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8485 u64EffAddr += (int32_t)u32Disp;
8486 break;
8487 }
8488 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8489 }
8490
8491 }
8492
8493 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8494 {
8495 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8496 return u64EffAddr;
8497 }
8498 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8499 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8500 return u64EffAddr & UINT32_MAX;
8501}
8502#endif /* IEM_WITH_SETJMP */
8503
8504
8505/**
8506 * Calculates the effective address of a ModR/M memory operand, extended version
8507 * for use in the recompilers.
8508 *
8509 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8510 *
8511 * @return Strict VBox status code.
8512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8513 * @param bRm The ModRM byte.
8514 * @param cbImmAndRspOffset - First byte: The size of any immediate
8515 * following the effective address opcode bytes
8516 * (only for RIP relative addressing).
8517 * - Second byte: RSP displacement (for POP [ESP]).
8518 * @param pGCPtrEff Where to return the effective address.
8519 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8520 * SIB byte (bits 39:32).
8521 */
8522VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8523{
8524 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8525# define SET_SS_DEF() \
8526 do \
8527 { \
8528 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8529 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8530 } while (0)
8531
8532 uint64_t uInfo;
8533 if (!IEM_IS_64BIT_CODE(pVCpu))
8534 {
8535/** @todo Check the effective address size crap! */
8536 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8537 {
8538 uint16_t u16EffAddr;
8539
8540 /* Handle the disp16 form with no registers first. */
8541 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8542 {
8543 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8544 uInfo = u16EffAddr;
8545 }
8546 else
8547 {
8548 /* Get the displacment. */
8549 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8550 {
8551 case 0: u16EffAddr = 0; break;
8552 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8553 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8554 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8555 }
8556 uInfo = u16EffAddr;
8557
8558 /* Add the base and index registers to the disp. */
8559 switch (bRm & X86_MODRM_RM_MASK)
8560 {
8561 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8562 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8563 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8564 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8565 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8566 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8567 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8568 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8569 }
8570 }
8571
8572 *pGCPtrEff = u16EffAddr;
8573 }
8574 else
8575 {
8576 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8577 uint32_t u32EffAddr;
8578
8579 /* Handle the disp32 form with no registers first. */
8580 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8581 {
8582 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8583 uInfo = u32EffAddr;
8584 }
8585 else
8586 {
8587 /* Get the register (or SIB) value. */
8588 uInfo = 0;
8589 switch ((bRm & X86_MODRM_RM_MASK))
8590 {
8591 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8592 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8593 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8594 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8595 case 4: /* SIB */
8596 {
8597 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8598 uInfo = (uint64_t)bSib << 32;
8599
8600 /* Get the index and scale it. */
8601 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8602 {
8603 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8604 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8605 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8606 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8607 case 4: u32EffAddr = 0; /*none */ break;
8608 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8609 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8610 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8611 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8612 }
8613 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8614
8615 /* add base */
8616 switch (bSib & X86_SIB_BASE_MASK)
8617 {
8618 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8619 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8620 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8621 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8622 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8623 case 5:
8624 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8625 {
8626 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8627 SET_SS_DEF();
8628 }
8629 else
8630 {
8631 uint32_t u32Disp;
8632 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8633 u32EffAddr += u32Disp;
8634 uInfo |= u32Disp;
8635 }
8636 break;
8637 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8638 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8639 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8640 }
8641 break;
8642 }
8643 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8644 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8645 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8646 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8647 }
8648
8649 /* Get and add the displacement. */
8650 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8651 {
8652 case 0:
8653 break;
8654 case 1:
8655 {
8656 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8657 u32EffAddr += i8Disp;
8658 uInfo |= (uint32_t)(int32_t)i8Disp;
8659 break;
8660 }
8661 case 2:
8662 {
8663 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8664 u32EffAddr += u32Disp;
8665 uInfo |= (uint32_t)u32Disp;
8666 break;
8667 }
8668 default:
8669 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8670 }
8671
8672 }
8673 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8674 *pGCPtrEff = u32EffAddr;
8675 }
8676 }
8677 else
8678 {
8679 uint64_t u64EffAddr;
8680
8681 /* Handle the rip+disp32 form with no registers first. */
8682 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8683 {
8684 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8685 uInfo = (uint32_t)u64EffAddr;
8686 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8687 }
8688 else
8689 {
8690 /* Get the register (or SIB) value. */
8691 uInfo = 0;
8692 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8693 {
8694 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8695 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8696 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8697 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8698 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8699 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8700 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8701 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8702 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8703 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8704 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8705 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8706 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8707 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8708 /* SIB */
8709 case 4:
8710 case 12:
8711 {
8712 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8713 uInfo = (uint64_t)bSib << 32;
8714
8715 /* Get the index and scale it. */
8716 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8717 {
8718 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8719 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8720 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8721 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8722 case 4: u64EffAddr = 0; /*none */ break;
8723 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8724 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8725 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8726 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8727 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8728 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8729 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8730 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8731 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8732 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8733 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8734 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8735 }
8736 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8737
8738 /* add base */
8739 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8740 {
8741 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8742 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8743 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8744 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8745 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8746 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8747 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8748 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8749 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8750 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8751 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8752 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8753 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8754 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8755 /* complicated encodings */
8756 case 5:
8757 case 13:
8758 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8759 {
8760 if (!pVCpu->iem.s.uRexB)
8761 {
8762 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8763 SET_SS_DEF();
8764 }
8765 else
8766 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8767 }
8768 else
8769 {
8770 uint32_t u32Disp;
8771 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8772 u64EffAddr += (int32_t)u32Disp;
8773 uInfo |= u32Disp;
8774 }
8775 break;
8776 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8777 }
8778 break;
8779 }
8780 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8781 }
8782
8783 /* Get and add the displacement. */
8784 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8785 {
8786 case 0:
8787 break;
8788 case 1:
8789 {
8790 int8_t i8Disp;
8791 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8792 u64EffAddr += i8Disp;
8793 uInfo |= (uint32_t)(int32_t)i8Disp;
8794 break;
8795 }
8796 case 2:
8797 {
8798 uint32_t u32Disp;
8799 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8800 u64EffAddr += (int32_t)u32Disp;
8801 uInfo |= u32Disp;
8802 break;
8803 }
8804 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8805 }
8806
8807 }
8808
8809 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8810 *pGCPtrEff = u64EffAddr;
8811 else
8812 {
8813 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8814 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8815 }
8816 }
8817 *puInfo = uInfo;
8818
8819 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
8820 return VINF_SUCCESS;
8821}
8822
8823/** @} */
8824
8825
8826#ifdef LOG_ENABLED
8827/**
8828 * Logs the current instruction.
8829 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8830 * @param fSameCtx Set if we have the same context information as the VMM,
8831 * clear if we may have already executed an instruction in
8832 * our debug context. When clear, we assume IEMCPU holds
8833 * valid CPU mode info.
8834 *
8835 * The @a fSameCtx parameter is now misleading and obsolete.
8836 * @param pszFunction The IEM function doing the execution.
8837 */
8838static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
8839{
8840# ifdef IN_RING3
8841 if (LogIs2Enabled())
8842 {
8843 char szInstr[256];
8844 uint32_t cbInstr = 0;
8845 if (fSameCtx)
8846 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
8847 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
8848 szInstr, sizeof(szInstr), &cbInstr);
8849 else
8850 {
8851 uint32_t fFlags = 0;
8852 switch (IEM_GET_CPU_MODE(pVCpu))
8853 {
8854 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
8855 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
8856 case IEMMODE_16BIT:
8857 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
8858 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
8859 else
8860 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
8861 break;
8862 }
8863 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
8864 szInstr, sizeof(szInstr), &cbInstr);
8865 }
8866
8867 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8868 Log2(("**** %s fExec=%x\n"
8869 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
8870 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
8871 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
8872 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
8873 " %s\n"
8874 , pszFunction, pVCpu->iem.s.fExec,
8875 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
8876 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
8877 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
8878 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
8879 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
8880 szInstr));
8881
8882 /* This stuff sucks atm. as it fills the log with MSRs. */
8883 //if (LogIs3Enabled())
8884 // DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
8885 }
8886 else
8887# endif
8888 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
8889 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
8890 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
8891}
8892#endif /* LOG_ENABLED */
8893
8894
8895#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8896/**
8897 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
8898 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
8899 *
8900 * @returns Modified rcStrict.
8901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8902 * @param rcStrict The instruction execution status.
8903 */
8904static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
8905{
8906 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
8907 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
8908 {
8909 /* VMX preemption timer takes priority over NMI-window exits. */
8910 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
8911 {
8912 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
8913 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
8914 }
8915 /*
8916 * Check remaining intercepts.
8917 *
8918 * NMI-window and Interrupt-window VM-exits.
8919 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
8920 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
8921 *
8922 * See Intel spec. 26.7.6 "NMI-Window Exiting".
8923 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
8924 */
8925 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
8926 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
8927 && !TRPMHasTrap(pVCpu))
8928 {
8929 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
8930 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
8931 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
8932 {
8933 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
8934 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
8935 }
8936 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
8937 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
8938 {
8939 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
8940 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
8941 }
8942 }
8943 }
8944 /* TPR-below threshold/APIC write has the highest priority. */
8945 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
8946 {
8947 rcStrict = iemVmxApicWriteEmulation(pVCpu);
8948 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
8949 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
8950 }
8951 /* MTF takes priority over VMX-preemption timer. */
8952 else
8953 {
8954 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
8955 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
8956 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
8957 }
8958 return rcStrict;
8959}
8960#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
8961
8962
8963/**
8964 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
8965 * IEMExecOneWithPrefetchedByPC.
8966 *
8967 * Similar code is found in IEMExecLots.
8968 *
8969 * @return Strict VBox status code.
8970 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
8971 * @param fExecuteInhibit If set, execute the instruction following CLI,
8972 * POP SS and MOV SS,GR.
8973 * @param pszFunction The calling function name.
8974 */
8975DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
8976{
8977 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
8978 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
8979 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
8980 RT_NOREF_PV(pszFunction);
8981
8982#ifdef IEM_WITH_SETJMP
8983 VBOXSTRICTRC rcStrict;
8984 IEM_TRY_SETJMP(pVCpu, rcStrict)
8985 {
8986 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
8987 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
8988 }
8989 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
8990 {
8991 pVCpu->iem.s.cLongJumps++;
8992 }
8993 IEM_CATCH_LONGJMP_END(pVCpu);
8994#else
8995 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
8996 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
8997#endif
8998 if (rcStrict == VINF_SUCCESS)
8999 pVCpu->iem.s.cInstructions++;
9000 if (pVCpu->iem.s.cActiveMappings > 0)
9001 {
9002 Assert(rcStrict != VINF_SUCCESS);
9003 iemMemRollback(pVCpu);
9004 }
9005 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9006 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9007 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9008
9009//#ifdef DEBUG
9010// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9011//#endif
9012
9013#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9014 /*
9015 * Perform any VMX nested-guest instruction boundary actions.
9016 *
9017 * If any of these causes a VM-exit, we must skip executing the next
9018 * instruction (would run into stale page tables). A VM-exit makes sure
9019 * there is no interrupt-inhibition, so that should ensure we don't go
9020 * to try execute the next instruction. Clearing fExecuteInhibit is
9021 * problematic because of the setjmp/longjmp clobbering above.
9022 */
9023 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9024 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9025 || rcStrict != VINF_SUCCESS)
9026 { /* likely */ }
9027 else
9028 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9029#endif
9030
9031 /* Execute the next instruction as well if a cli, pop ss or
9032 mov ss, Gr has just completed successfully. */
9033 if ( fExecuteInhibit
9034 && rcStrict == VINF_SUCCESS
9035 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9036 {
9037 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9038 if (rcStrict == VINF_SUCCESS)
9039 {
9040#ifdef LOG_ENABLED
9041 iemLogCurInstr(pVCpu, false, pszFunction);
9042#endif
9043#ifdef IEM_WITH_SETJMP
9044 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9045 {
9046 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9047 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9048 }
9049 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9050 {
9051 pVCpu->iem.s.cLongJumps++;
9052 }
9053 IEM_CATCH_LONGJMP_END(pVCpu);
9054#else
9055 IEM_OPCODE_GET_FIRST_U8(&b);
9056 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9057#endif
9058 if (rcStrict == VINF_SUCCESS)
9059 {
9060 pVCpu->iem.s.cInstructions++;
9061#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9062 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9063 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9064 { /* likely */ }
9065 else
9066 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9067#endif
9068 }
9069 if (pVCpu->iem.s.cActiveMappings > 0)
9070 {
9071 Assert(rcStrict != VINF_SUCCESS);
9072 iemMemRollback(pVCpu);
9073 }
9074 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9075 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9076 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9077 }
9078 else if (pVCpu->iem.s.cActiveMappings > 0)
9079 iemMemRollback(pVCpu);
9080 /** @todo drop this after we bake this change into RIP advancing. */
9081 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9082 }
9083
9084 /*
9085 * Return value fiddling, statistics and sanity assertions.
9086 */
9087 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9088
9089 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9090 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9091 return rcStrict;
9092}
9093
9094
9095/**
9096 * Execute one instruction.
9097 *
9098 * @return Strict VBox status code.
9099 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9100 */
9101VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9102{
9103 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9104#ifdef LOG_ENABLED
9105 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9106#endif
9107
9108 /*
9109 * Do the decoding and emulation.
9110 */
9111 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9112 if (rcStrict == VINF_SUCCESS)
9113 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9114 else if (pVCpu->iem.s.cActiveMappings > 0)
9115 iemMemRollback(pVCpu);
9116
9117 if (rcStrict != VINF_SUCCESS)
9118 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9119 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9120 return rcStrict;
9121}
9122
9123
9124VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9125{
9126 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9127 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9128 if (rcStrict == VINF_SUCCESS)
9129 {
9130 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9131 if (pcbWritten)
9132 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9133 }
9134 else if (pVCpu->iem.s.cActiveMappings > 0)
9135 iemMemRollback(pVCpu);
9136
9137 return rcStrict;
9138}
9139
9140
9141VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9142 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9143{
9144 VBOXSTRICTRC rcStrict;
9145 if ( cbOpcodeBytes
9146 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9147 {
9148 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9149#ifdef IEM_WITH_CODE_TLB
9150 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9151 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9152 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9153 pVCpu->iem.s.offCurInstrStart = 0;
9154 pVCpu->iem.s.offInstrNextByte = 0;
9155 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9156#else
9157 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9158 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9159#endif
9160 rcStrict = VINF_SUCCESS;
9161 }
9162 else
9163 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9164 if (rcStrict == VINF_SUCCESS)
9165 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9166 else if (pVCpu->iem.s.cActiveMappings > 0)
9167 iemMemRollback(pVCpu);
9168
9169 return rcStrict;
9170}
9171
9172
9173VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9174{
9175 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9176 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9177 if (rcStrict == VINF_SUCCESS)
9178 {
9179 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9180 if (pcbWritten)
9181 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9182 }
9183 else if (pVCpu->iem.s.cActiveMappings > 0)
9184 iemMemRollback(pVCpu);
9185
9186 return rcStrict;
9187}
9188
9189
9190VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9191 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9192{
9193 VBOXSTRICTRC rcStrict;
9194 if ( cbOpcodeBytes
9195 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9196 {
9197 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9198#ifdef IEM_WITH_CODE_TLB
9199 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9200 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9201 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9202 pVCpu->iem.s.offCurInstrStart = 0;
9203 pVCpu->iem.s.offInstrNextByte = 0;
9204 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9205#else
9206 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9207 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9208#endif
9209 rcStrict = VINF_SUCCESS;
9210 }
9211 else
9212 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9213 if (rcStrict == VINF_SUCCESS)
9214 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9215 else if (pVCpu->iem.s.cActiveMappings > 0)
9216 iemMemRollback(pVCpu);
9217
9218 return rcStrict;
9219}
9220
9221
9222/**
9223 * For handling split cacheline lock operations when the host has split-lock
9224 * detection enabled.
9225 *
9226 * This will cause the interpreter to disregard the lock prefix and implicit
9227 * locking (xchg).
9228 *
9229 * @returns Strict VBox status code.
9230 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9231 */
9232VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9233{
9234 /*
9235 * Do the decoding and emulation.
9236 */
9237 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9238 if (rcStrict == VINF_SUCCESS)
9239 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9240 else if (pVCpu->iem.s.cActiveMappings > 0)
9241 iemMemRollback(pVCpu);
9242
9243 if (rcStrict != VINF_SUCCESS)
9244 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9245 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9246 return rcStrict;
9247}
9248
9249
9250/**
9251 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9252 * inject a pending TRPM trap.
9253 */
9254VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9255{
9256 Assert(TRPMHasTrap(pVCpu));
9257
9258 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9259 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9260 {
9261 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9262#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9263 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9264 if (fIntrEnabled)
9265 {
9266 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9267 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9268 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9269 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9270 else
9271 {
9272 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9273 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9274 }
9275 }
9276#else
9277 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9278#endif
9279 if (fIntrEnabled)
9280 {
9281 uint8_t u8TrapNo;
9282 TRPMEVENT enmType;
9283 uint32_t uErrCode;
9284 RTGCPTR uCr2;
9285 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9286 AssertRC(rc2);
9287 Assert(enmType == TRPM_HARDWARE_INT);
9288 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9289
9290 TRPMResetTrap(pVCpu);
9291
9292#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9293 /* Injecting an event may cause a VM-exit. */
9294 if ( rcStrict != VINF_SUCCESS
9295 && rcStrict != VINF_IEM_RAISED_XCPT)
9296 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9297#else
9298 NOREF(rcStrict);
9299#endif
9300 }
9301 }
9302
9303 return VINF_SUCCESS;
9304}
9305
9306
9307VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9308{
9309 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9310 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9311 Assert(cMaxInstructions > 0);
9312
9313 /*
9314 * See if there is an interrupt pending in TRPM, inject it if we can.
9315 */
9316 /** @todo What if we are injecting an exception and not an interrupt? Is that
9317 * possible here? For now we assert it is indeed only an interrupt. */
9318 if (!TRPMHasTrap(pVCpu))
9319 { /* likely */ }
9320 else
9321 {
9322 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9323 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9324 { /*likely */ }
9325 else
9326 return rcStrict;
9327 }
9328
9329 /*
9330 * Initial decoder init w/ prefetch, then setup setjmp.
9331 */
9332 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9333 if (rcStrict == VINF_SUCCESS)
9334 {
9335#ifdef IEM_WITH_SETJMP
9336 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9337 IEM_TRY_SETJMP(pVCpu, rcStrict)
9338#endif
9339 {
9340 /*
9341 * The run loop. We limit ourselves to 4096 instructions right now.
9342 */
9343 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9344 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9345 for (;;)
9346 {
9347 /*
9348 * Log the state.
9349 */
9350#ifdef LOG_ENABLED
9351 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9352#endif
9353
9354 /*
9355 * Do the decoding and emulation.
9356 */
9357 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9358 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9359#ifdef VBOX_STRICT
9360 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9361#endif
9362 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9363 {
9364 Assert(pVCpu->iem.s.cActiveMappings == 0);
9365 pVCpu->iem.s.cInstructions++;
9366
9367#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9368 /* Perform any VMX nested-guest instruction boundary actions. */
9369 uint64_t fCpu = pVCpu->fLocalForcedActions;
9370 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9371 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9372 { /* likely */ }
9373 else
9374 {
9375 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9376 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9377 fCpu = pVCpu->fLocalForcedActions;
9378 else
9379 {
9380 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9381 break;
9382 }
9383 }
9384#endif
9385 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9386 {
9387#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9388 uint64_t fCpu = pVCpu->fLocalForcedActions;
9389#endif
9390 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9391 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9392 | VMCPU_FF_TLB_FLUSH
9393 | VMCPU_FF_UNHALT );
9394
9395 if (RT_LIKELY( ( !fCpu
9396 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9397 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9398 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9399 {
9400 if (--cMaxInstructionsGccStupidity > 0)
9401 {
9402 /* Poll timers every now an then according to the caller's specs. */
9403 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9404 || !TMTimerPollBool(pVM, pVCpu))
9405 {
9406 Assert(pVCpu->iem.s.cActiveMappings == 0);
9407 iemReInitDecoder(pVCpu);
9408 continue;
9409 }
9410 }
9411 }
9412 }
9413 Assert(pVCpu->iem.s.cActiveMappings == 0);
9414 }
9415 else if (pVCpu->iem.s.cActiveMappings > 0)
9416 iemMemRollback(pVCpu);
9417 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9418 break;
9419 }
9420 }
9421#ifdef IEM_WITH_SETJMP
9422 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9423 {
9424 if (pVCpu->iem.s.cActiveMappings > 0)
9425 iemMemRollback(pVCpu);
9426# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9427 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9428# endif
9429 pVCpu->iem.s.cLongJumps++;
9430 }
9431 IEM_CATCH_LONGJMP_END(pVCpu);
9432#endif
9433
9434 /*
9435 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9436 */
9437 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9438 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9439 }
9440 else
9441 {
9442 if (pVCpu->iem.s.cActiveMappings > 0)
9443 iemMemRollback(pVCpu);
9444
9445#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9446 /*
9447 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9448 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9449 */
9450 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9451#endif
9452 }
9453
9454 /*
9455 * Maybe re-enter raw-mode and log.
9456 */
9457 if (rcStrict != VINF_SUCCESS)
9458 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9459 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9460 if (pcInstructions)
9461 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9462 return rcStrict;
9463}
9464
9465
9466/**
9467 * Interface used by EMExecuteExec, does exit statistics and limits.
9468 *
9469 * @returns Strict VBox status code.
9470 * @param pVCpu The cross context virtual CPU structure.
9471 * @param fWillExit To be defined.
9472 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9473 * @param cMaxInstructions Maximum number of instructions to execute.
9474 * @param cMaxInstructionsWithoutExits
9475 * The max number of instructions without exits.
9476 * @param pStats Where to return statistics.
9477 */
9478VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9479 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9480{
9481 NOREF(fWillExit); /** @todo define flexible exit crits */
9482
9483 /*
9484 * Initialize return stats.
9485 */
9486 pStats->cInstructions = 0;
9487 pStats->cExits = 0;
9488 pStats->cMaxExitDistance = 0;
9489 pStats->cReserved = 0;
9490
9491 /*
9492 * Initial decoder init w/ prefetch, then setup setjmp.
9493 */
9494 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9495 if (rcStrict == VINF_SUCCESS)
9496 {
9497#ifdef IEM_WITH_SETJMP
9498 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9499 IEM_TRY_SETJMP(pVCpu, rcStrict)
9500#endif
9501 {
9502#ifdef IN_RING0
9503 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9504#endif
9505 uint32_t cInstructionSinceLastExit = 0;
9506
9507 /*
9508 * The run loop. We limit ourselves to 4096 instructions right now.
9509 */
9510 PVM pVM = pVCpu->CTX_SUFF(pVM);
9511 for (;;)
9512 {
9513 /*
9514 * Log the state.
9515 */
9516#ifdef LOG_ENABLED
9517 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9518#endif
9519
9520 /*
9521 * Do the decoding and emulation.
9522 */
9523 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9524
9525 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9526 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9527
9528 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9529 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9530 {
9531 pStats->cExits += 1;
9532 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9533 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9534 cInstructionSinceLastExit = 0;
9535 }
9536
9537 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9538 {
9539 Assert(pVCpu->iem.s.cActiveMappings == 0);
9540 pVCpu->iem.s.cInstructions++;
9541 pStats->cInstructions++;
9542 cInstructionSinceLastExit++;
9543
9544#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9545 /* Perform any VMX nested-guest instruction boundary actions. */
9546 uint64_t fCpu = pVCpu->fLocalForcedActions;
9547 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9548 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9549 { /* likely */ }
9550 else
9551 {
9552 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9553 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9554 fCpu = pVCpu->fLocalForcedActions;
9555 else
9556 {
9557 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9558 break;
9559 }
9560 }
9561#endif
9562 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9563 {
9564#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9565 uint64_t fCpu = pVCpu->fLocalForcedActions;
9566#endif
9567 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9568 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9569 | VMCPU_FF_TLB_FLUSH
9570 | VMCPU_FF_UNHALT );
9571 if (RT_LIKELY( ( ( !fCpu
9572 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9573 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9574 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9575 || pStats->cInstructions < cMinInstructions))
9576 {
9577 if (pStats->cInstructions < cMaxInstructions)
9578 {
9579 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9580 {
9581#ifdef IN_RING0
9582 if ( !fCheckPreemptionPending
9583 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9584#endif
9585 {
9586 Assert(pVCpu->iem.s.cActiveMappings == 0);
9587 iemReInitDecoder(pVCpu);
9588 continue;
9589 }
9590#ifdef IN_RING0
9591 rcStrict = VINF_EM_RAW_INTERRUPT;
9592 break;
9593#endif
9594 }
9595 }
9596 }
9597 Assert(!(fCpu & VMCPU_FF_IEM));
9598 }
9599 Assert(pVCpu->iem.s.cActiveMappings == 0);
9600 }
9601 else if (pVCpu->iem.s.cActiveMappings > 0)
9602 iemMemRollback(pVCpu);
9603 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9604 break;
9605 }
9606 }
9607#ifdef IEM_WITH_SETJMP
9608 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9609 {
9610 if (pVCpu->iem.s.cActiveMappings > 0)
9611 iemMemRollback(pVCpu);
9612 pVCpu->iem.s.cLongJumps++;
9613 }
9614 IEM_CATCH_LONGJMP_END(pVCpu);
9615#endif
9616
9617 /*
9618 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9619 */
9620 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9621 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9622 }
9623 else
9624 {
9625 if (pVCpu->iem.s.cActiveMappings > 0)
9626 iemMemRollback(pVCpu);
9627
9628#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9629 /*
9630 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9631 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9632 */
9633 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9634#endif
9635 }
9636
9637 /*
9638 * Maybe re-enter raw-mode and log.
9639 */
9640 if (rcStrict != VINF_SUCCESS)
9641 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9642 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9643 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9644 return rcStrict;
9645}
9646
9647
9648/**
9649 * Injects a trap, fault, abort, software interrupt or external interrupt.
9650 *
9651 * The parameter list matches TRPMQueryTrapAll pretty closely.
9652 *
9653 * @returns Strict VBox status code.
9654 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9655 * @param u8TrapNo The trap number.
9656 * @param enmType What type is it (trap/fault/abort), software
9657 * interrupt or hardware interrupt.
9658 * @param uErrCode The error code if applicable.
9659 * @param uCr2 The CR2 value if applicable.
9660 * @param cbInstr The instruction length (only relevant for
9661 * software interrupts).
9662 */
9663VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9664 uint8_t cbInstr)
9665{
9666 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9667#ifdef DBGFTRACE_ENABLED
9668 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9669 u8TrapNo, enmType, uErrCode, uCr2);
9670#endif
9671
9672 uint32_t fFlags;
9673 switch (enmType)
9674 {
9675 case TRPM_HARDWARE_INT:
9676 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9677 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9678 uErrCode = uCr2 = 0;
9679 break;
9680
9681 case TRPM_SOFTWARE_INT:
9682 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9683 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9684 uErrCode = uCr2 = 0;
9685 break;
9686
9687 case TRPM_TRAP:
9688 case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
9689 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9690 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9691 if (u8TrapNo == X86_XCPT_PF)
9692 fFlags |= IEM_XCPT_FLAGS_CR2;
9693 switch (u8TrapNo)
9694 {
9695 case X86_XCPT_DF:
9696 case X86_XCPT_TS:
9697 case X86_XCPT_NP:
9698 case X86_XCPT_SS:
9699 case X86_XCPT_PF:
9700 case X86_XCPT_AC:
9701 case X86_XCPT_GP:
9702 fFlags |= IEM_XCPT_FLAGS_ERR;
9703 break;
9704 }
9705 break;
9706
9707 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9708 }
9709
9710 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9711
9712 if (pVCpu->iem.s.cActiveMappings > 0)
9713 iemMemRollback(pVCpu);
9714
9715 return rcStrict;
9716}
9717
9718
9719/**
9720 * Injects the active TRPM event.
9721 *
9722 * @returns Strict VBox status code.
9723 * @param pVCpu The cross context virtual CPU structure.
9724 */
9725VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9726{
9727#ifndef IEM_IMPLEMENTS_TASKSWITCH
9728 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9729#else
9730 uint8_t u8TrapNo;
9731 TRPMEVENT enmType;
9732 uint32_t uErrCode;
9733 RTGCUINTPTR uCr2;
9734 uint8_t cbInstr;
9735 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9736 if (RT_FAILURE(rc))
9737 return rc;
9738
9739 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9740 * ICEBP \#DB injection as a special case. */
9741 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9742#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9743 if (rcStrict == VINF_SVM_VMEXIT)
9744 rcStrict = VINF_SUCCESS;
9745#endif
9746#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9747 if (rcStrict == VINF_VMX_VMEXIT)
9748 rcStrict = VINF_SUCCESS;
9749#endif
9750 /** @todo Are there any other codes that imply the event was successfully
9751 * delivered to the guest? See @bugref{6607}. */
9752 if ( rcStrict == VINF_SUCCESS
9753 || rcStrict == VINF_IEM_RAISED_XCPT)
9754 TRPMResetTrap(pVCpu);
9755
9756 return rcStrict;
9757#endif
9758}
9759
9760
9761VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9762{
9763 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9764 return VERR_NOT_IMPLEMENTED;
9765}
9766
9767
9768VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9769{
9770 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9771 return VERR_NOT_IMPLEMENTED;
9772}
9773
9774
9775/**
9776 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9777 *
9778 * This API ASSUMES that the caller has already verified that the guest code is
9779 * allowed to access the I/O port. (The I/O port is in the DX register in the
9780 * guest state.)
9781 *
9782 * @returns Strict VBox status code.
9783 * @param pVCpu The cross context virtual CPU structure.
9784 * @param cbValue The size of the I/O port access (1, 2, or 4).
9785 * @param enmAddrMode The addressing mode.
9786 * @param fRepPrefix Indicates whether a repeat prefix is used
9787 * (doesn't matter which for this instruction).
9788 * @param cbInstr The instruction length in bytes.
9789 * @param iEffSeg The effective segment address.
9790 * @param fIoChecked Whether the access to the I/O port has been
9791 * checked or not. It's typically checked in the
9792 * HM scenario.
9793 */
9794VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9795 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
9796{
9797 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9798 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9799
9800 /*
9801 * State init.
9802 */
9803 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9804
9805 /*
9806 * Switch orgy for getting to the right handler.
9807 */
9808 VBOXSTRICTRC rcStrict;
9809 if (fRepPrefix)
9810 {
9811 switch (enmAddrMode)
9812 {
9813 case IEMMODE_16BIT:
9814 switch (cbValue)
9815 {
9816 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9817 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9818 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9819 default:
9820 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9821 }
9822 break;
9823
9824 case IEMMODE_32BIT:
9825 switch (cbValue)
9826 {
9827 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9828 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9829 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9830 default:
9831 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9832 }
9833 break;
9834
9835 case IEMMODE_64BIT:
9836 switch (cbValue)
9837 {
9838 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9839 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9840 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9841 default:
9842 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9843 }
9844 break;
9845
9846 default:
9847 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9848 }
9849 }
9850 else
9851 {
9852 switch (enmAddrMode)
9853 {
9854 case IEMMODE_16BIT:
9855 switch (cbValue)
9856 {
9857 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9858 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9859 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9860 default:
9861 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9862 }
9863 break;
9864
9865 case IEMMODE_32BIT:
9866 switch (cbValue)
9867 {
9868 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9869 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9870 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9871 default:
9872 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9873 }
9874 break;
9875
9876 case IEMMODE_64BIT:
9877 switch (cbValue)
9878 {
9879 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9880 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9881 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
9882 default:
9883 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9884 }
9885 break;
9886
9887 default:
9888 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9889 }
9890 }
9891
9892 if (pVCpu->iem.s.cActiveMappings)
9893 iemMemRollback(pVCpu);
9894
9895 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
9896}
9897
9898
9899/**
9900 * Interface for HM and EM for executing string I/O IN (read) instructions.
9901 *
9902 * This API ASSUMES that the caller has already verified that the guest code is
9903 * allowed to access the I/O port. (The I/O port is in the DX register in the
9904 * guest state.)
9905 *
9906 * @returns Strict VBox status code.
9907 * @param pVCpu The cross context virtual CPU structure.
9908 * @param cbValue The size of the I/O port access (1, 2, or 4).
9909 * @param enmAddrMode The addressing mode.
9910 * @param fRepPrefix Indicates whether a repeat prefix is used
9911 * (doesn't matter which for this instruction).
9912 * @param cbInstr The instruction length in bytes.
9913 * @param fIoChecked Whether the access to the I/O port has been
9914 * checked or not. It's typically checked in the
9915 * HM scenario.
9916 */
9917VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9918 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
9919{
9920 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9921
9922 /*
9923 * State init.
9924 */
9925 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9926
9927 /*
9928 * Switch orgy for getting to the right handler.
9929 */
9930 VBOXSTRICTRC rcStrict;
9931 if (fRepPrefix)
9932 {
9933 switch (enmAddrMode)
9934 {
9935 case IEMMODE_16BIT:
9936 switch (cbValue)
9937 {
9938 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
9939 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
9940 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
9941 default:
9942 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9943 }
9944 break;
9945
9946 case IEMMODE_32BIT:
9947 switch (cbValue)
9948 {
9949 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
9950 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
9951 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
9952 default:
9953 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9954 }
9955 break;
9956
9957 case IEMMODE_64BIT:
9958 switch (cbValue)
9959 {
9960 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
9961 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
9962 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
9963 default:
9964 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9965 }
9966 break;
9967
9968 default:
9969 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
9970 }
9971 }
9972 else
9973 {
9974 switch (enmAddrMode)
9975 {
9976 case IEMMODE_16BIT:
9977 switch (cbValue)
9978 {
9979 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
9980 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
9981 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
9982 default:
9983 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9984 }
9985 break;
9986
9987 case IEMMODE_32BIT:
9988 switch (cbValue)
9989 {
9990 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
9991 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
9992 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
9993 default:
9994 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
9995 }
9996 break;
9997
9998 case IEMMODE_64BIT:
9999 switch (cbValue)
10000 {
10001 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10002 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10003 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10004 default:
10005 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10006 }
10007 break;
10008
10009 default:
10010 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10011 }
10012 }
10013
10014 if ( pVCpu->iem.s.cActiveMappings == 0
10015 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10016 { /* likely */ }
10017 else
10018 {
10019 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10020 iemMemRollback(pVCpu);
10021 }
10022 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10023}
10024
10025
10026/**
10027 * Interface for rawmode to write execute an OUT instruction.
10028 *
10029 * @returns Strict VBox status code.
10030 * @param pVCpu The cross context virtual CPU structure.
10031 * @param cbInstr The instruction length in bytes.
10032 * @param u16Port The port to read.
10033 * @param fImm Whether the port is specified using an immediate operand or
10034 * using the implicit DX register.
10035 * @param cbReg The register size.
10036 *
10037 * @remarks In ring-0 not all of the state needs to be synced in.
10038 */
10039VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10040{
10041 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10042 Assert(cbReg <= 4 && cbReg != 3);
10043
10044 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10045 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10046 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10047 Assert(!pVCpu->iem.s.cActiveMappings);
10048 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10049}
10050
10051
10052/**
10053 * Interface for rawmode to write execute an IN instruction.
10054 *
10055 * @returns Strict VBox status code.
10056 * @param pVCpu The cross context virtual CPU structure.
10057 * @param cbInstr The instruction length in bytes.
10058 * @param u16Port The port to read.
10059 * @param fImm Whether the port is specified using an immediate operand or
10060 * using the implicit DX.
10061 * @param cbReg The register size.
10062 */
10063VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10064{
10065 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10066 Assert(cbReg <= 4 && cbReg != 3);
10067
10068 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10069 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10070 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10071 Assert(!pVCpu->iem.s.cActiveMappings);
10072 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10073}
10074
10075
10076/**
10077 * Interface for HM and EM to write to a CRx register.
10078 *
10079 * @returns Strict VBox status code.
10080 * @param pVCpu The cross context virtual CPU structure.
10081 * @param cbInstr The instruction length in bytes.
10082 * @param iCrReg The control register number (destination).
10083 * @param iGReg The general purpose register number (source).
10084 *
10085 * @remarks In ring-0 not all of the state needs to be synced in.
10086 */
10087VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10088{
10089 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10090 Assert(iCrReg < 16);
10091 Assert(iGReg < 16);
10092
10093 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10094 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10095 Assert(!pVCpu->iem.s.cActiveMappings);
10096 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10097}
10098
10099
10100/**
10101 * Interface for HM and EM to read from a CRx register.
10102 *
10103 * @returns Strict VBox status code.
10104 * @param pVCpu The cross context virtual CPU structure.
10105 * @param cbInstr The instruction length in bytes.
10106 * @param iGReg The general purpose register number (destination).
10107 * @param iCrReg The control register number (source).
10108 *
10109 * @remarks In ring-0 not all of the state needs to be synced in.
10110 */
10111VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10112{
10113 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10114 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10115 | CPUMCTX_EXTRN_APIC_TPR);
10116 Assert(iCrReg < 16);
10117 Assert(iGReg < 16);
10118
10119 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10120 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10121 Assert(!pVCpu->iem.s.cActiveMappings);
10122 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10123}
10124
10125
10126/**
10127 * Interface for HM and EM to write to a DRx register.
10128 *
10129 * @returns Strict VBox status code.
10130 * @param pVCpu The cross context virtual CPU structure.
10131 * @param cbInstr The instruction length in bytes.
10132 * @param iDrReg The debug register number (destination).
10133 * @param iGReg The general purpose register number (source).
10134 *
10135 * @remarks In ring-0 not all of the state needs to be synced in.
10136 */
10137VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10138{
10139 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10140 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10141 Assert(iDrReg < 8);
10142 Assert(iGReg < 16);
10143
10144 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10145 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10146 Assert(!pVCpu->iem.s.cActiveMappings);
10147 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10148}
10149
10150
10151/**
10152 * Interface for HM and EM to read from a DRx register.
10153 *
10154 * @returns Strict VBox status code.
10155 * @param pVCpu The cross context virtual CPU structure.
10156 * @param cbInstr The instruction length in bytes.
10157 * @param iGReg The general purpose register number (destination).
10158 * @param iDrReg The debug register number (source).
10159 *
10160 * @remarks In ring-0 not all of the state needs to be synced in.
10161 */
10162VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10163{
10164 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10165 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10166 Assert(iDrReg < 8);
10167 Assert(iGReg < 16);
10168
10169 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10170 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10171 Assert(!pVCpu->iem.s.cActiveMappings);
10172 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10173}
10174
10175
10176/**
10177 * Interface for HM and EM to clear the CR0[TS] bit.
10178 *
10179 * @returns Strict VBox status code.
10180 * @param pVCpu The cross context virtual CPU structure.
10181 * @param cbInstr The instruction length in bytes.
10182 *
10183 * @remarks In ring-0 not all of the state needs to be synced in.
10184 */
10185VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10186{
10187 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10188
10189 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10190 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10191 Assert(!pVCpu->iem.s.cActiveMappings);
10192 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10193}
10194
10195
10196/**
10197 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10198 *
10199 * @returns Strict VBox status code.
10200 * @param pVCpu The cross context virtual CPU structure.
10201 * @param cbInstr The instruction length in bytes.
10202 * @param uValue The value to load into CR0.
10203 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10204 * memory operand. Otherwise pass NIL_RTGCPTR.
10205 *
10206 * @remarks In ring-0 not all of the state needs to be synced in.
10207 */
10208VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10209{
10210 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10211
10212 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10213 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10214 Assert(!pVCpu->iem.s.cActiveMappings);
10215 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10216}
10217
10218
10219/**
10220 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10221 *
10222 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10223 *
10224 * @returns Strict VBox status code.
10225 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10226 * @param cbInstr The instruction length in bytes.
10227 * @remarks In ring-0 not all of the state needs to be synced in.
10228 * @thread EMT(pVCpu)
10229 */
10230VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10231{
10232 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10233
10234 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10235 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10236 Assert(!pVCpu->iem.s.cActiveMappings);
10237 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10238}
10239
10240
10241/**
10242 * Interface for HM and EM to emulate the WBINVD instruction.
10243 *
10244 * @returns Strict VBox status code.
10245 * @param pVCpu The cross context virtual CPU structure.
10246 * @param cbInstr The instruction length in bytes.
10247 *
10248 * @remarks In ring-0 not all of the state needs to be synced in.
10249 */
10250VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10251{
10252 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10253
10254 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10255 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10256 Assert(!pVCpu->iem.s.cActiveMappings);
10257 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10258}
10259
10260
10261/**
10262 * Interface for HM and EM to emulate the INVD instruction.
10263 *
10264 * @returns Strict VBox status code.
10265 * @param pVCpu The cross context virtual CPU structure.
10266 * @param cbInstr The instruction length in bytes.
10267 *
10268 * @remarks In ring-0 not all of the state needs to be synced in.
10269 */
10270VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10271{
10272 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10273
10274 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10275 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10276 Assert(!pVCpu->iem.s.cActiveMappings);
10277 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10278}
10279
10280
10281/**
10282 * Interface for HM and EM to emulate the INVLPG instruction.
10283 *
10284 * @returns Strict VBox status code.
10285 * @retval VINF_PGM_SYNC_CR3
10286 *
10287 * @param pVCpu The cross context virtual CPU structure.
10288 * @param cbInstr The instruction length in bytes.
10289 * @param GCPtrPage The effective address of the page to invalidate.
10290 *
10291 * @remarks In ring-0 not all of the state needs to be synced in.
10292 */
10293VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10294{
10295 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10296
10297 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10298 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10299 Assert(!pVCpu->iem.s.cActiveMappings);
10300 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10301}
10302
10303
10304/**
10305 * Interface for HM and EM to emulate the INVPCID instruction.
10306 *
10307 * @returns Strict VBox status code.
10308 * @retval VINF_PGM_SYNC_CR3
10309 *
10310 * @param pVCpu The cross context virtual CPU structure.
10311 * @param cbInstr The instruction length in bytes.
10312 * @param iEffSeg The effective segment register.
10313 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10314 * @param uType The invalidation type.
10315 *
10316 * @remarks In ring-0 not all of the state needs to be synced in.
10317 */
10318VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10319 uint64_t uType)
10320{
10321 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10322
10323 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10324 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10325 Assert(!pVCpu->iem.s.cActiveMappings);
10326 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10327}
10328
10329
10330/**
10331 * Interface for HM and EM to emulate the CPUID instruction.
10332 *
10333 * @returns Strict VBox status code.
10334 *
10335 * @param pVCpu The cross context virtual CPU structure.
10336 * @param cbInstr The instruction length in bytes.
10337 *
10338 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10339 */
10340VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10341{
10342 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10343 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10344
10345 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10346 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10347 Assert(!pVCpu->iem.s.cActiveMappings);
10348 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10349}
10350
10351
10352/**
10353 * Interface for HM and EM to emulate the RDPMC instruction.
10354 *
10355 * @returns Strict VBox status code.
10356 *
10357 * @param pVCpu The cross context virtual CPU structure.
10358 * @param cbInstr The instruction length in bytes.
10359 *
10360 * @remarks Not all of the state needs to be synced in.
10361 */
10362VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10363{
10364 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10365 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10366
10367 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10368 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10369 Assert(!pVCpu->iem.s.cActiveMappings);
10370 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10371}
10372
10373
10374/**
10375 * Interface for HM and EM to emulate the RDTSC instruction.
10376 *
10377 * @returns Strict VBox status code.
10378 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10379 *
10380 * @param pVCpu The cross context virtual CPU structure.
10381 * @param cbInstr The instruction length in bytes.
10382 *
10383 * @remarks Not all of the state needs to be synced in.
10384 */
10385VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10386{
10387 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10388 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10389
10390 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10391 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10392 Assert(!pVCpu->iem.s.cActiveMappings);
10393 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10394}
10395
10396
10397/**
10398 * Interface for HM and EM to emulate the RDTSCP instruction.
10399 *
10400 * @returns Strict VBox status code.
10401 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10402 *
10403 * @param pVCpu The cross context virtual CPU structure.
10404 * @param cbInstr The instruction length in bytes.
10405 *
10406 * @remarks Not all of the state needs to be synced in. Recommended
10407 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10408 */
10409VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10410{
10411 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10412 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10413
10414 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10415 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10416 Assert(!pVCpu->iem.s.cActiveMappings);
10417 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10418}
10419
10420
10421/**
10422 * Interface for HM and EM to emulate the RDMSR instruction.
10423 *
10424 * @returns Strict VBox status code.
10425 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10426 *
10427 * @param pVCpu The cross context virtual CPU structure.
10428 * @param cbInstr The instruction length in bytes.
10429 *
10430 * @remarks Not all of the state needs to be synced in. Requires RCX and
10431 * (currently) all MSRs.
10432 */
10433VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10434{
10435 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10436 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10437
10438 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10439 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10440 Assert(!pVCpu->iem.s.cActiveMappings);
10441 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10442}
10443
10444
10445/**
10446 * Interface for HM and EM to emulate the WRMSR instruction.
10447 *
10448 * @returns Strict VBox status code.
10449 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10450 *
10451 * @param pVCpu The cross context virtual CPU structure.
10452 * @param cbInstr The instruction length in bytes.
10453 *
10454 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10455 * and (currently) all MSRs.
10456 */
10457VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10458{
10459 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10460 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10461 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10462
10463 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10464 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10465 Assert(!pVCpu->iem.s.cActiveMappings);
10466 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10467}
10468
10469
10470/**
10471 * Interface for HM and EM to emulate the MONITOR instruction.
10472 *
10473 * @returns Strict VBox status code.
10474 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10475 *
10476 * @param pVCpu The cross context virtual CPU structure.
10477 * @param cbInstr The instruction length in bytes.
10478 *
10479 * @remarks Not all of the state needs to be synced in.
10480 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10481 * are used.
10482 */
10483VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10484{
10485 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10486 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10487
10488 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10489 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10490 Assert(!pVCpu->iem.s.cActiveMappings);
10491 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10492}
10493
10494
10495/**
10496 * Interface for HM and EM to emulate the MWAIT instruction.
10497 *
10498 * @returns Strict VBox status code.
10499 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10500 *
10501 * @param pVCpu The cross context virtual CPU structure.
10502 * @param cbInstr The instruction length in bytes.
10503 *
10504 * @remarks Not all of the state needs to be synced in.
10505 */
10506VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10507{
10508 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10509 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10510
10511 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10512 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10513 Assert(!pVCpu->iem.s.cActiveMappings);
10514 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10515}
10516
10517
10518/**
10519 * Interface for HM and EM to emulate the HLT instruction.
10520 *
10521 * @returns Strict VBox status code.
10522 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10523 *
10524 * @param pVCpu The cross context virtual CPU structure.
10525 * @param cbInstr The instruction length in bytes.
10526 *
10527 * @remarks Not all of the state needs to be synced in.
10528 */
10529VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10530{
10531 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10532
10533 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10534 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10535 Assert(!pVCpu->iem.s.cActiveMappings);
10536 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10537}
10538
10539
10540/**
10541 * Checks if IEM is in the process of delivering an event (interrupt or
10542 * exception).
10543 *
10544 * @returns true if we're in the process of raising an interrupt or exception,
10545 * false otherwise.
10546 * @param pVCpu The cross context virtual CPU structure.
10547 * @param puVector Where to store the vector associated with the
10548 * currently delivered event, optional.
10549 * @param pfFlags Where to store th event delivery flags (see
10550 * IEM_XCPT_FLAGS_XXX), optional.
10551 * @param puErr Where to store the error code associated with the
10552 * event, optional.
10553 * @param puCr2 Where to store the CR2 associated with the event,
10554 * optional.
10555 * @remarks The caller should check the flags to determine if the error code and
10556 * CR2 are valid for the event.
10557 */
10558VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10559{
10560 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10561 if (fRaisingXcpt)
10562 {
10563 if (puVector)
10564 *puVector = pVCpu->iem.s.uCurXcpt;
10565 if (pfFlags)
10566 *pfFlags = pVCpu->iem.s.fCurXcpt;
10567 if (puErr)
10568 *puErr = pVCpu->iem.s.uCurXcptErr;
10569 if (puCr2)
10570 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10571 }
10572 return fRaisingXcpt;
10573}
10574
10575#ifdef IN_RING3
10576
10577/**
10578 * Handles the unlikely and probably fatal merge cases.
10579 *
10580 * @returns Merged status code.
10581 * @param rcStrict Current EM status code.
10582 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10583 * with @a rcStrict.
10584 * @param iMemMap The memory mapping index. For error reporting only.
10585 * @param pVCpu The cross context virtual CPU structure of the calling
10586 * thread, for error reporting only.
10587 */
10588DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10589 unsigned iMemMap, PVMCPUCC pVCpu)
10590{
10591 if (RT_FAILURE_NP(rcStrict))
10592 return rcStrict;
10593
10594 if (RT_FAILURE_NP(rcStrictCommit))
10595 return rcStrictCommit;
10596
10597 if (rcStrict == rcStrictCommit)
10598 return rcStrictCommit;
10599
10600 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10601 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10602 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10603 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10604 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10605 return VERR_IOM_FF_STATUS_IPE;
10606}
10607
10608
10609/**
10610 * Helper for IOMR3ProcessForceFlag.
10611 *
10612 * @returns Merged status code.
10613 * @param rcStrict Current EM status code.
10614 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10615 * with @a rcStrict.
10616 * @param iMemMap The memory mapping index. For error reporting only.
10617 * @param pVCpu The cross context virtual CPU structure of the calling
10618 * thread, for error reporting only.
10619 */
10620DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10621{
10622 /* Simple. */
10623 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10624 return rcStrictCommit;
10625
10626 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10627 return rcStrict;
10628
10629 /* EM scheduling status codes. */
10630 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10631 && rcStrict <= VINF_EM_LAST))
10632 {
10633 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10634 && rcStrictCommit <= VINF_EM_LAST))
10635 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10636 }
10637
10638 /* Unlikely */
10639 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10640}
10641
10642
10643/**
10644 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10645 *
10646 * @returns Merge between @a rcStrict and what the commit operation returned.
10647 * @param pVM The cross context VM structure.
10648 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10649 * @param rcStrict The status code returned by ring-0 or raw-mode.
10650 */
10651VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10652{
10653 /*
10654 * Reset the pending commit.
10655 */
10656 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10657 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10658 ("%#x %#x %#x\n",
10659 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10660 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10661
10662 /*
10663 * Commit the pending bounce buffers (usually just one).
10664 */
10665 unsigned cBufs = 0;
10666 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10667 while (iMemMap-- > 0)
10668 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10669 {
10670 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10671 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10672 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10673
10674 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10675 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10676 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10677
10678 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10679 {
10680 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10681 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10682 pbBuf,
10683 cbFirst,
10684 PGMACCESSORIGIN_IEM);
10685 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10686 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10687 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10688 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10689 }
10690
10691 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10692 {
10693 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10694 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10695 pbBuf + cbFirst,
10696 cbSecond,
10697 PGMACCESSORIGIN_IEM);
10698 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10699 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10700 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10701 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10702 }
10703 cBufs++;
10704 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10705 }
10706
10707 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10708 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10709 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10710 pVCpu->iem.s.cActiveMappings = 0;
10711 return rcStrict;
10712}
10713
10714#endif /* IN_RING3 */
10715
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use