VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 100931

Last change on this file since 100931 was 100868, checked in by vboxsync, 21 months ago

VBox/log.h,VMM/IEM: Added a dedicated logging group for IEM memory accesses: IEM_MEM bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 444.6 KB
Line 
1/* $Id: IEMAll.cpp 100868 2023-08-14 00:49:27Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/** @page pg_iem IEM - Interpreted Execution Manager
30 *
31 * The interpreted exeuction manager (IEM) is for executing short guest code
32 * sequences that are causing too many exits / virtualization traps. It will
33 * also be used to interpret single instructions, thus replacing the selective
34 * interpreters in EM and IOM.
35 *
36 * Design goals:
37 * - Relatively small footprint, although we favour speed and correctness
38 * over size.
39 * - Reasonably fast.
40 * - Correctly handle lock prefixed instructions.
41 * - Complete instruction set - eventually.
42 * - Refactorable into a recompiler, maybe.
43 * - Replace EMInterpret*.
44 *
45 * Using the existing disassembler has been considered, however this is thought
46 * to conflict with speed as the disassembler chews things a bit too much while
47 * leaving us with a somewhat complicated state to interpret afterwards.
48 *
49 *
50 * The current code is very much work in progress. You've been warned!
51 *
52 *
53 * @section sec_iem_fpu_instr FPU Instructions
54 *
55 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
56 * same or equivalent instructions on the host FPU. To make life easy, we also
57 * let the FPU prioritize the unmasked exceptions for us. This however, only
58 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
59 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
60 * can trigger spurious FPU exceptions.
61 *
62 * The guest FPU state is not loaded into the host CPU and kept there till we
63 * leave IEM because the calling conventions have declared an all year open
64 * season on much of the FPU state. For instance an innocent looking call to
65 * memcpy might end up using a whole bunch of XMM or MM registers if the
66 * particular implementation finds it worthwhile.
67 *
68 *
69 * @section sec_iem_logging Logging
70 *
71 * The IEM code uses the \"IEM\" log group for the main logging. The different
72 * logging levels/flags are generally used for the following purposes:
73 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
74 * - Flow (LogFlow) : Basic enter/exit IEM state info.
75 * - Level 2 (Log2) : ?
76 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
77 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
78 * - Level 5 (Log5) : Decoding details.
79 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
80 * - Level 7 (Log7) : iret++ execution logging.
81 * - Level 8 (Log8) :
82 * - Level 9 (Log9) :
83 * - Level 10 (Log10): TLBs.
84 * - Level 11 (Log11): Unmasked FPU exceptions.
85 *
86 * The \"IEM_MEM\" log group covers most of memory related details logging,
87 * except for errors and exceptions:
88 * - Level 1 (Log) : Reads.
89 * - Level 2 (Log2) : Read fallbacks.
90 * - Level 3 (Log3) : MemMap read.
91 * - Level 4 (Log4) : MemMap read fallbacks.
92 * - Level 5 (Log5) : Writes
93 * - Level 6 (Log6) : Write fallbacks.
94 * - Level 7 (Log7) : MemMap writes and read-writes.
95 * - Level 8 (Log8) : MemMap write and read-write fallbacks.
96 * - Level 9 (Log9) : Stack reads.
97 * - Level 10 (Log10): Stack read fallbacks.
98 * - Level 11 (Log11): Stack writes.
99 * - Level 12 (Log12): Stack write fallbacks.
100 * - Flow (LogFlow) :
101 *
102 * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
103 * - Level 1 (Log) : Errors and other major events.
104 * - Flow (LogFlow) : Misc flow stuff (cleanup?)
105 * - Level 2 (Log2) : VM exits.
106 *
107 * The syscall logging level assignments:
108 * - Level 1: DOS and BIOS.
109 * - Level 2: Windows 3.x
110 * - Level 3: Linux.
111 */
112
113/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
114#ifdef _MSC_VER
115# pragma warning(disable:4505)
116#endif
117
118
119/*********************************************************************************************************************************
120* Header Files *
121*********************************************************************************************************************************/
122#define LOG_GROUP LOG_GROUP_IEM
123#define VMCPU_INCL_CPUM_GST_CTX
124#include <VBox/vmm/iem.h>
125#include <VBox/vmm/cpum.h>
126#include <VBox/vmm/apic.h>
127#include <VBox/vmm/pdm.h>
128#include <VBox/vmm/pgm.h>
129#include <VBox/vmm/iom.h>
130#include <VBox/vmm/em.h>
131#include <VBox/vmm/hm.h>
132#include <VBox/vmm/nem.h>
133#include <VBox/vmm/gim.h>
134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
135# include <VBox/vmm/em.h>
136# include <VBox/vmm/hm_svm.h>
137#endif
138#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
139# include <VBox/vmm/hmvmxinline.h>
140#endif
141#include <VBox/vmm/tm.h>
142#include <VBox/vmm/dbgf.h>
143#include <VBox/vmm/dbgftrace.h>
144#include "IEMInternal.h"
145#include <VBox/vmm/vmcc.h>
146#include <VBox/log.h>
147#include <VBox/err.h>
148#include <VBox/param.h>
149#include <VBox/dis.h>
150#include <iprt/asm-math.h>
151#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
152# include <iprt/asm-amd64-x86.h>
153#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
154# include <iprt/asm-arm.h>
155#endif
156#include <iprt/assert.h>
157#include <iprt/string.h>
158#include <iprt/x86.h>
159
160#include "IEMInline.h"
161
162
163/*********************************************************************************************************************************
164* Structures and Typedefs *
165*********************************************************************************************************************************/
166/**
167 * CPU exception classes.
168 */
169typedef enum IEMXCPTCLASS
170{
171 IEMXCPTCLASS_BENIGN,
172 IEMXCPTCLASS_CONTRIBUTORY,
173 IEMXCPTCLASS_PAGE_FAULT,
174 IEMXCPTCLASS_DOUBLE_FAULT
175} IEMXCPTCLASS;
176
177
178/*********************************************************************************************************************************
179* Global Variables *
180*********************************************************************************************************************************/
181#if defined(IEM_LOG_MEMORY_WRITES)
182/** What IEM just wrote. */
183uint8_t g_abIemWrote[256];
184/** How much IEM just wrote. */
185size_t g_cbIemWrote;
186#endif
187
188
189/*********************************************************************************************************************************
190* Internal Functions *
191*********************************************************************************************************************************/
192static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
193 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
194
195
196/**
197 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
198 * path.
199 *
200 * @returns IEM_F_BRK_PENDING_XXX or zero.
201 * @param pVCpu The cross context virtual CPU structure of the
202 * calling thread.
203 *
204 * @note Don't call directly, use iemCalcExecDbgFlags instead.
205 */
206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
207{
208 uint32_t fExec = 0;
209
210 /*
211 * Process guest breakpoints.
212 */
213#define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
214 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
215 { \
216 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
217 { \
218 case X86_DR7_RW_EO: \
219 fExec |= IEM_F_PENDING_BRK_INSTR; \
220 break; \
221 case X86_DR7_RW_WO: \
222 case X86_DR7_RW_RW: \
223 fExec |= IEM_F_PENDING_BRK_DATA; \
224 break; \
225 case X86_DR7_RW_IO: \
226 fExec |= IEM_F_PENDING_BRK_X86_IO; \
227 break; \
228 } \
229 } \
230 } while (0)
231
232 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
233 if (fGstDr7 & X86_DR7_ENABLED_MASK)
234 {
235 PROCESS_ONE_BP(fGstDr7, 0);
236 PROCESS_ONE_BP(fGstDr7, 1);
237 PROCESS_ONE_BP(fGstDr7, 2);
238 PROCESS_ONE_BP(fGstDr7, 3);
239 }
240
241 /*
242 * Process hypervisor breakpoints.
243 */
244 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
245 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
246 {
247 PROCESS_ONE_BP(fHyperDr7, 0);
248 PROCESS_ONE_BP(fHyperDr7, 1);
249 PROCESS_ONE_BP(fHyperDr7, 2);
250 PROCESS_ONE_BP(fHyperDr7, 3);
251 }
252
253 return fExec;
254}
255
256
257/**
258 * Initializes the decoder state.
259 *
260 * iemReInitDecoder is mostly a copy of this function.
261 *
262 * @param pVCpu The cross context virtual CPU structure of the
263 * calling thread.
264 * @param fExecOpts Optional execution flags:
265 * - IEM_F_BYPASS_HANDLERS
266 * - IEM_F_X86_DISREGARD_LOCK
267 */
268DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
269{
270 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
271 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
272 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
280
281 /* Execution state: */
282 uint32_t fExec;
283 pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
284
285 /* Decoder state: */
286 pVCpu->iem.s.enmDefAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
287 pVCpu->iem.s.enmEffAddrMode = fExec & IEM_F_MODE_CPUMODE_MASK;
288 if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
289 {
290 pVCpu->iem.s.enmDefOpSize = fExec & IEM_F_MODE_CPUMODE_MASK; /** @todo check if this is correct... */
291 pVCpu->iem.s.enmEffOpSize = fExec & IEM_F_MODE_CPUMODE_MASK;
292 }
293 else
294 {
295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
297 }
298 pVCpu->iem.s.fPrefixes = 0;
299 pVCpu->iem.s.uRexReg = 0;
300 pVCpu->iem.s.uRexB = 0;
301 pVCpu->iem.s.uRexIndex = 0;
302 pVCpu->iem.s.idxPrefix = 0;
303 pVCpu->iem.s.uVex3rdReg = 0;
304 pVCpu->iem.s.uVexLength = 0;
305 pVCpu->iem.s.fEvexStuff = 0;
306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
307#ifdef IEM_WITH_CODE_TLB
308 pVCpu->iem.s.pbInstrBuf = NULL;
309 pVCpu->iem.s.offInstrNextByte = 0;
310 pVCpu->iem.s.offCurInstrStart = 0;
311# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
312 pVCpu->iem.s.offOpcode = 0;
313# endif
314# ifdef VBOX_STRICT
315 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
316 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
317 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
318 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
319# endif
320#else
321 pVCpu->iem.s.offOpcode = 0;
322 pVCpu->iem.s.cbOpcode = 0;
323#endif
324 pVCpu->iem.s.offModRm = 0;
325 pVCpu->iem.s.cActiveMappings = 0;
326 pVCpu->iem.s.iNextMapping = 0;
327 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
328
329#ifdef DBGFTRACE_ENABLED
330 switch (IEM_GET_CPU_MODE(pVCpu))
331 {
332 case IEMMODE_64BIT:
333 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
334 break;
335 case IEMMODE_32BIT:
336 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
337 break;
338 case IEMMODE_16BIT:
339 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
340 break;
341 }
342#endif
343}
344
345
346/**
347 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
348 *
349 * This is mostly a copy of iemInitDecoder.
350 *
351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
352 */
353DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
354{
355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
357 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
358 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
359 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
360 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
361 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
362 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
363 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
364
365 /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
366 AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
367 ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
368
369 IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
370 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
371 pVCpu->iem.s.enmEffAddrMode = enmMode;
372 if (enmMode != IEMMODE_64BIT)
373 {
374 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
375 pVCpu->iem.s.enmEffOpSize = enmMode;
376 }
377 else
378 {
379 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
380 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
381 }
382 pVCpu->iem.s.fPrefixes = 0;
383 pVCpu->iem.s.uRexReg = 0;
384 pVCpu->iem.s.uRexB = 0;
385 pVCpu->iem.s.uRexIndex = 0;
386 pVCpu->iem.s.idxPrefix = 0;
387 pVCpu->iem.s.uVex3rdReg = 0;
388 pVCpu->iem.s.uVexLength = 0;
389 pVCpu->iem.s.fEvexStuff = 0;
390 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
391#ifdef IEM_WITH_CODE_TLB
392 if (pVCpu->iem.s.pbInstrBuf)
393 {
394 uint64_t off = (enmMode == IEMMODE_64BIT
395 ? pVCpu->cpum.GstCtx.rip
396 : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
397 - pVCpu->iem.s.uInstrBufPc;
398 if (off < pVCpu->iem.s.cbInstrBufTotal)
399 {
400 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
401 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
402 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
403 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
404 else
405 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
406 }
407 else
408 {
409 pVCpu->iem.s.pbInstrBuf = NULL;
410 pVCpu->iem.s.offInstrNextByte = 0;
411 pVCpu->iem.s.offCurInstrStart = 0;
412 pVCpu->iem.s.cbInstrBuf = 0;
413 pVCpu->iem.s.cbInstrBufTotal = 0;
414 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
415 }
416 }
417 else
418 {
419 pVCpu->iem.s.offInstrNextByte = 0;
420 pVCpu->iem.s.offCurInstrStart = 0;
421 pVCpu->iem.s.cbInstrBuf = 0;
422 pVCpu->iem.s.cbInstrBufTotal = 0;
423# ifdef VBOX_STRICT
424 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
425# endif
426 }
427# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
428 pVCpu->iem.s.offOpcode = 0;
429# endif
430#else /* !IEM_WITH_CODE_TLB */
431 pVCpu->iem.s.cbOpcode = 0;
432 pVCpu->iem.s.offOpcode = 0;
433#endif /* !IEM_WITH_CODE_TLB */
434 pVCpu->iem.s.offModRm = 0;
435 Assert(pVCpu->iem.s.cActiveMappings == 0);
436 pVCpu->iem.s.iNextMapping = 0;
437 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
438 Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
439
440#ifdef DBGFTRACE_ENABLED
441 switch (enmMode)
442 {
443 case IEMMODE_64BIT:
444 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
445 break;
446 case IEMMODE_32BIT:
447 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
448 break;
449 case IEMMODE_16BIT:
450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
451 break;
452 }
453#endif
454}
455
456
457
458/**
459 * Prefetch opcodes the first time when starting executing.
460 *
461 * @returns Strict VBox status code.
462 * @param pVCpu The cross context virtual CPU structure of the
463 * calling thread.
464 * @param fExecOpts Optional execution flags:
465 * - IEM_F_BYPASS_HANDLERS
466 * - IEM_F_X86_DISREGARD_LOCK
467 */
468static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
469{
470 iemInitDecoder(pVCpu, fExecOpts);
471
472#ifndef IEM_WITH_CODE_TLB
473 /*
474 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
475 *
476 * First translate CS:rIP to a physical address.
477 *
478 * Note! The iemOpcodeFetchMoreBytes code depends on this here code to fetch
479 * all relevant bytes from the first page, as it ASSUMES it's only ever
480 * called for dealing with CS.LIM, page crossing and instructions that
481 * are too long.
482 */
483 uint32_t cbToTryRead;
484 RTGCPTR GCPtrPC;
485 if (IEM_IS_64BIT_CODE(pVCpu))
486 {
487 cbToTryRead = GUEST_PAGE_SIZE;
488 GCPtrPC = pVCpu->cpum.GstCtx.rip;
489 if (IEM_IS_CANONICAL(GCPtrPC))
490 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
491 else
492 return iemRaiseGeneralProtectionFault0(pVCpu);
493 }
494 else
495 {
496 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
497 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu), ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
498 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
499 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
500 else
501 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
502 if (cbToTryRead) { /* likely */ }
503 else /* overflowed */
504 {
505 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
506 cbToTryRead = UINT32_MAX;
507 }
508 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
509 Assert(GCPtrPC <= UINT32_MAX);
510 }
511
512 PGMPTWALK Walk;
513 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
514 if (RT_SUCCESS(rc))
515 Assert(Walk.fSucceeded); /* probable. */
516 else
517 {
518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
519# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
520 if (Walk.fFailed & PGM_WALKFAIL_EPT)
521 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
522# endif
523 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, rc);
524 }
525 if ((Walk.fEffective & X86_PTE_US) || IEM_GET_CPL(pVCpu) != 3) { /* likely */ }
526 else
527 {
528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
529# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
530 if (Walk.fFailed & PGM_WALKFAIL_EPT)
531 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
532# endif
533 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
534 }
535 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
536 else
537 {
538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
539# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
540 if (Walk.fFailed & PGM_WALKFAIL_EPT)
541 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
542# endif
543 return iemRaisePageFault(pVCpu, GCPtrPC, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
544 }
545 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
546 /** @todo Check reserved bits and such stuff. PGM is better at doing
547 * that, so do it when implementing the guest virtual address
548 * TLB... */
549
550 /*
551 * Read the bytes at this address.
552 */
553 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
554 if (cbToTryRead > cbLeftOnPage)
555 cbToTryRead = cbLeftOnPage;
556 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
557 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
558
559 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
560 {
561 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
562 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
563 { /* likely */ }
564 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
565 {
566 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
567 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
568 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
569 }
570 else
571 {
572 Log((RT_SUCCESS(rcStrict)
573 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
574 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
575 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
576 return rcStrict;
577 }
578 }
579 else
580 {
581 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
582 if (RT_SUCCESS(rc))
583 { /* likely */ }
584 else
585 {
586 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
587 GCPtrPC, GCPhys, rc, cbToTryRead));
588 return rc;
589 }
590 }
591 pVCpu->iem.s.cbOpcode = cbToTryRead;
592#endif /* !IEM_WITH_CODE_TLB */
593 return VINF_SUCCESS;
594}
595
596
597/**
598 * Invalidates the IEM TLBs.
599 *
600 * This is called internally as well as by PGM when moving GC mappings.
601 *
602 * @param pVCpu The cross context virtual CPU structure of the calling
603 * thread.
604 */
605VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
606{
607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
608 Log10(("IEMTlbInvalidateAll\n"));
609# ifdef IEM_WITH_CODE_TLB
610 pVCpu->iem.s.cbInstrBufTotal = 0;
611 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
612 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
613 { /* very likely */ }
614 else
615 {
616 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
617 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
618 while (i-- > 0)
619 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
620 }
621# endif
622
623# ifdef IEM_WITH_DATA_TLB
624 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
625 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
626 { /* very likely */ }
627 else
628 {
629 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
630 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
631 while (i-- > 0)
632 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
633 }
634# endif
635#else
636 RT_NOREF(pVCpu);
637#endif
638}
639
640
641/**
642 * Invalidates a page in the TLBs.
643 *
644 * @param pVCpu The cross context virtual CPU structure of the calling
645 * thread.
646 * @param GCPtr The address of the page to invalidate
647 * @thread EMT(pVCpu)
648 */
649VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
653 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
654 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
655 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
656
657# ifdef IEM_WITH_CODE_TLB
658 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
659 {
660 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
661 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
662 pVCpu->iem.s.cbInstrBufTotal = 0;
663 }
664# endif
665
666# ifdef IEM_WITH_DATA_TLB
667 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
668 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
669# endif
670#else
671 NOREF(pVCpu); NOREF(GCPtr);
672#endif
673}
674
675
676#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
677/**
678 * Invalid both TLBs slow fashion following a rollover.
679 *
680 * Worker for IEMTlbInvalidateAllPhysical,
681 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
682 * iemMemMapJmp and others.
683 *
684 * @thread EMT(pVCpu)
685 */
686static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
687{
688 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
689 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
690 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
691
692 unsigned i;
693# ifdef IEM_WITH_CODE_TLB
694 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
695 while (i-- > 0)
696 {
697 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
698 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
699 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
700 }
701# endif
702# ifdef IEM_WITH_DATA_TLB
703 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
704 while (i-- > 0)
705 {
706 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
707 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
708 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
709 }
710# endif
711
712}
713#endif
714
715
716/**
717 * Invalidates the host physical aspects of the IEM TLBs.
718 *
719 * This is called internally as well as by PGM when moving GC mappings.
720 *
721 * @param pVCpu The cross context virtual CPU structure of the calling
722 * thread.
723 * @note Currently not used.
724 */
725VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
726{
727#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
728 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
729 Log10(("IEMTlbInvalidateAllPhysical\n"));
730
731# ifdef IEM_WITH_CODE_TLB
732 pVCpu->iem.s.cbInstrBufTotal = 0;
733# endif
734 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
735 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
736 {
737 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
738 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
739 }
740 else
741 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
742#else
743 NOREF(pVCpu);
744#endif
745}
746
747
748/**
749 * Invalidates the host physical aspects of the IEM TLBs.
750 *
751 * This is called internally as well as by PGM when moving GC mappings.
752 *
753 * @param pVM The cross context VM structure.
754 * @param idCpuCaller The ID of the calling EMT if available to the caller,
755 * otherwise NIL_VMCPUID.
756 *
757 * @remarks Caller holds the PGM lock.
758 */
759VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
760{
761#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
762 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
763 if (pVCpuCaller)
764 VMCPU_ASSERT_EMT(pVCpuCaller);
765 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
766
767 VMCC_FOR_EACH_VMCPU(pVM)
768 {
769# ifdef IEM_WITH_CODE_TLB
770 if (pVCpuCaller == pVCpu)
771 pVCpu->iem.s.cbInstrBufTotal = 0;
772# endif
773
774 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
775 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
776 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
777 { /* likely */}
778 else if (pVCpuCaller == pVCpu)
779 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
780 else
781 {
782 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
783 continue;
784 }
785 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
786 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
787 }
788 VMCC_FOR_EACH_VMCPU_END(pVM);
789
790#else
791 RT_NOREF(pVM, idCpuCaller);
792#endif
793}
794
795
796/**
797 * Flushes the prefetch buffer, light version.
798 */
799void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr)
800{
801#ifndef IEM_WITH_CODE_TLB
802 pVCpu->iem.s.cbOpcode = cbInstr;
803#else
804 RT_NOREF(pVCpu, cbInstr);
805#endif
806}
807
808
809/**
810 * Flushes the prefetch buffer, heavy version.
811 */
812void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr)
813{
814#ifndef IEM_WITH_CODE_TLB
815 pVCpu->iem.s.cbOpcode = cbInstr; /* Note! SVM and VT-x may set this to zero on exit, rather than the instruction length. */
816#elif 1
817 pVCpu->iem.s.pbInstrBuf = NULL;
818 pVCpu->iem.s.cbInstrBufTotal = 0;
819 RT_NOREF(cbInstr);
820#else
821 RT_NOREF(pVCpu, cbInstr);
822#endif
823}
824
825
826
827#ifdef IEM_WITH_CODE_TLB
828
829/**
830 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
831 * failure and jumps.
832 *
833 * We end up here for a number of reasons:
834 * - pbInstrBuf isn't yet initialized.
835 * - Advancing beyond the buffer boundrary (e.g. cross page).
836 * - Advancing beyond the CS segment limit.
837 * - Fetching from non-mappable page (e.g. MMIO).
838 *
839 * @param pVCpu The cross context virtual CPU structure of the
840 * calling thread.
841 * @param pvDst Where to return the bytes.
842 * @param cbDst Number of bytes to read. A value of zero is
843 * allowed for initializing pbInstrBuf (the
844 * recompiler does this). In this case it is best
845 * to set pbInstrBuf to NULL prior to the call.
846 */
847void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP
848{
849# ifdef IN_RING3
850 for (;;)
851 {
852 Assert(cbDst <= 8);
853 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
854
855 /*
856 * We might have a partial buffer match, deal with that first to make the
857 * rest simpler. This is the first part of the cross page/buffer case.
858 */
859 if (pVCpu->iem.s.pbInstrBuf != NULL)
860 {
861 if (offBuf < pVCpu->iem.s.cbInstrBuf)
862 {
863 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
864 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
865 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
866
867 cbDst -= cbCopy;
868 pvDst = (uint8_t *)pvDst + cbCopy;
869 offBuf += cbCopy;
870 pVCpu->iem.s.offInstrNextByte += offBuf;
871 }
872 }
873
874 /*
875 * Check segment limit, figuring how much we're allowed to access at this point.
876 *
877 * We will fault immediately if RIP is past the segment limit / in non-canonical
878 * territory. If we do continue, there are one or more bytes to read before we
879 * end up in trouble and we need to do that first before faulting.
880 */
881 RTGCPTR GCPtrFirst;
882 uint32_t cbMaxRead;
883 if (IEM_IS_64BIT_CODE(pVCpu))
884 {
885 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
886 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
887 { /* likely */ }
888 else
889 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
890 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
891 }
892 else
893 {
894 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
895 /* Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
896 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
897 { /* likely */ }
898 else /** @todo For CPUs older than the 386, we should not necessarily generate \#GP here but wrap around! */
899 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
900 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
901 if (cbMaxRead != 0)
902 { /* likely */ }
903 else
904 {
905 /* Overflowed because address is 0 and limit is max. */
906 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
907 cbMaxRead = X86_PAGE_SIZE;
908 }
909 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
910 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
911 if (cbMaxRead2 < cbMaxRead)
912 cbMaxRead = cbMaxRead2;
913 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
914 }
915
916 /*
917 * Get the TLB entry for this piece of code.
918 */
919 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
920 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
921 if (pTlbe->uTag == uTag)
922 {
923 /* likely when executing lots of code, otherwise unlikely */
924# ifdef VBOX_WITH_STATISTICS
925 pVCpu->iem.s.CodeTlb.cTlbHits++;
926# endif
927 }
928 else
929 {
930 pVCpu->iem.s.CodeTlb.cTlbMisses++;
931 PGMPTWALK Walk;
932 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
933 if (RT_FAILURE(rc))
934 {
935#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
936 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
937 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
938#endif
939 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
940 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, rc);
941 }
942
943 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
944 Assert(Walk.fSucceeded);
945 pTlbe->uTag = uTag;
946 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
947 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
948 pTlbe->GCPhys = Walk.GCPhys;
949 pTlbe->pbMappingR3 = NULL;
950 }
951
952 /*
953 * Check TLB page table level access flags.
954 */
955 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
956 {
957 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && IEM_GET_CPL(pVCpu) == 3)
958 {
959 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
960 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
961 }
962 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
963 {
964 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
965 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
966 }
967 }
968
969 /*
970 * Look up the physical page info if necessary.
971 */
972 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
973 { /* not necessary */ }
974 else
975 {
976 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
977 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
978 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
979 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
980 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
981 { /* likely */ }
982 else
983 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
984 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
985 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
986 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
987 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
988 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
989 }
990
991# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
992 /*
993 * Try do a direct read using the pbMappingR3 pointer.
994 */
995 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
996 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
997 {
998 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
999 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1000 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1001 {
1002 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1003 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1004 }
1005 else
1006 {
1007 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1008 if (cbInstr + (uint32_t)cbDst <= 15)
1009 {
1010 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1011 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1012 }
1013 else
1014 {
1015 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1016 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1017 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1018 }
1019 }
1020 if (cbDst <= cbMaxRead)
1021 {
1022 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0; /** @todo Spurious load effect on branch handling? */
1023 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1024
1025 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1026 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1027 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1028 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1029 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1030 return;
1031 }
1032 pVCpu->iem.s.pbInstrBuf = NULL;
1033
1034 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1035 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1036 }
1037# else
1038# error "refactor as needed"
1039 /*
1040 * If there is no special read handling, so we can read a bit more and
1041 * put it in the prefetch buffer.
1042 */
1043 if ( cbDst < cbMaxRead
1044 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1045 {
1046 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1047 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1048 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1049 { /* likely */ }
1050 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1051 {
1052 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1053 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1054 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1055 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICRC_VAL(rcStrict)));
1056 }
1057 else
1058 {
1059 Log((RT_SUCCESS(rcStrict)
1060 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1061 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1062 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1063 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1064 }
1065 }
1066# endif
1067 /*
1068 * Special read handling, so only read exactly what's needed.
1069 * This is a highly unlikely scenario.
1070 */
1071 else
1072 {
1073 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1074
1075 /* Check instruction length. */
1076 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1077 if (RT_LIKELY(cbInstr + cbDst <= 15))
1078 { /* likely */ }
1079 else
1080 {
1081 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0) [slow]\n",
1082 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, cbDst));
1083 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1084 }
1085
1086 /* Do the reading. */
1087 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1088 if (cbToRead > 0)
1089 {
1090 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1091 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1092 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1093 { /* likely */ }
1094 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1095 {
1096 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1097 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1098 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1099 AssertStmt(rcStrict == VINF_SUCCESS, IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict)));
1100 }
1101 else
1102 {
1103 Log((RT_SUCCESS(rcStrict)
1104 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1105 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1106 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1107 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1108 }
1109 }
1110
1111 /* Update the state and probably return. */
1112 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1113 pVCpu->iem.s.fTbCrossedPage |= offPg == 0 || pVCpu->iem.s.fTbBranched != 0;
1114 pVCpu->iem.s.GCPhysInstrBufPrev = pVCpu->iem.s.GCPhysInstrBuf;
1115
1116 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1117 pVCpu->iem.s.offInstrNextByte = offPg + cbInstr + cbToRead;
1118 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead + cbInstr) - cbToRead - cbInstr;
1119 pVCpu->iem.s.cbInstrBufTotal = X86_PAGE_SIZE; /** @todo ??? */
1120 pVCpu->iem.s.GCPhysInstrBuf = pTlbe->GCPhys;
1121 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1122 pVCpu->iem.s.pbInstrBuf = NULL;
1123 if (cbToRead == cbDst)
1124 return;
1125 }
1126
1127 /*
1128 * More to read, loop.
1129 */
1130 cbDst -= cbMaxRead;
1131 pvDst = (uint8_t *)pvDst + cbMaxRead;
1132 }
1133# else /* !IN_RING3 */
1134 RT_NOREF(pvDst, cbDst);
1135 if (pvDst || cbDst)
1136 IEM_DO_LONGJMP(pVCpu, VERR_INTERNAL_ERROR);
1137# endif /* !IN_RING3 */
1138}
1139
1140#else /* !IEM_WITH_CODE_TLB */
1141
1142/**
1143 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1144 * exception if it fails.
1145 *
1146 * @returns Strict VBox status code.
1147 * @param pVCpu The cross context virtual CPU structure of the
1148 * calling thread.
1149 * @param cbMin The minimum number of bytes relative offOpcode
1150 * that must be read.
1151 */
1152VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
1153{
1154 /*
1155 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1156 *
1157 * First translate CS:rIP to a physical address.
1158 */
1159 uint8_t const cbOpcode = pVCpu->iem.s.cbOpcode;
1160 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1161 uint8_t const cbLeft = cbOpcode - offOpcode;
1162 Assert(cbLeft < cbMin);
1163 Assert(cbOpcode <= sizeof(pVCpu->iem.s.abOpcode));
1164
1165 uint32_t cbToTryRead;
1166 RTGCPTR GCPtrNext;
1167 if (IEM_IS_64BIT_CODE(pVCpu))
1168 {
1169 GCPtrNext = pVCpu->cpum.GstCtx.rip + cbOpcode;
1170 if (!IEM_IS_CANONICAL(GCPtrNext))
1171 return iemRaiseGeneralProtectionFault0(pVCpu);
1172 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1173 }
1174 else
1175 {
1176 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1177 /* Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || IEM_IS_32BIT_CODE(pVCpu)); - this is allowed */
1178 GCPtrNext32 += cbOpcode;
1179 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1180 /** @todo For CPUs older than the 386, we should not generate \#GP here but wrap around! */
1181 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1182 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1183 if (!cbToTryRead) /* overflowed */
1184 {
1185 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1186 cbToTryRead = UINT32_MAX;
1187 /** @todo check out wrapping around the code segment. */
1188 }
1189 if (cbToTryRead < cbMin - cbLeft)
1190 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1191 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1192
1193 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1194 if (cbToTryRead > cbLeftOnPage)
1195 cbToTryRead = cbLeftOnPage;
1196 }
1197
1198 /* Restrict to opcode buffer space.
1199
1200 We're making ASSUMPTIONS here based on work done previously in
1201 iemInitDecoderAndPrefetchOpcodes, where bytes from the first page will
1202 be fetched in case of an instruction crossing two pages. */
1203 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - cbOpcode)
1204 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - cbOpcode;
1205 if (RT_LIKELY(cbToTryRead + cbLeft >= cbMin))
1206 { /* likely */ }
1207 else
1208 {
1209 Log(("iemOpcodeFetchMoreBytes: %04x:%08RX64 LB %#x + %#zx -> #GP(0)\n",
1210 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, offOpcode, cbMin));
1211 return iemRaiseGeneralProtectionFault0(pVCpu);
1212 }
1213
1214 PGMPTWALK Walk;
1215 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1216 if (RT_FAILURE(rc))
1217 {
1218 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1219#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1220 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1221 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1222#endif
1223 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, rc);
1224 }
1225 if (!(Walk.fEffective & X86_PTE_US) && IEM_GET_CPL(pVCpu) == 3)
1226 {
1227 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1228#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1229 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1230 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1231#endif
1232 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1233 }
1234 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1235 {
1236 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1237#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1238 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1239 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1240#endif
1241 return iemRaisePageFault(pVCpu, GCPtrNext, 1, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1242 }
1243 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1244 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, cbOpcode));
1245 /** @todo Check reserved bits and such stuff. PGM is better at doing
1246 * that, so do it when implementing the guest virtual address
1247 * TLB... */
1248
1249 /*
1250 * Read the bytes at this address.
1251 *
1252 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1253 * and since PATM should only patch the start of an instruction there
1254 * should be no need to check again here.
1255 */
1256 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
1257 {
1258 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[cbOpcode],
1259 cbToTryRead, PGMACCESSORIGIN_IEM);
1260 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1261 { /* likely */ }
1262 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1263 {
1264 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1265 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1266 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1267 }
1268 else
1269 {
1270 Log((RT_SUCCESS(rcStrict)
1271 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1272 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1273 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1274 return rcStrict;
1275 }
1276 }
1277 else
1278 {
1279 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[cbOpcode], GCPhys, cbToTryRead);
1280 if (RT_SUCCESS(rc))
1281 { /* likely */ }
1282 else
1283 {
1284 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1285 return rc;
1286 }
1287 }
1288 pVCpu->iem.s.cbOpcode = cbOpcode + cbToTryRead;
1289 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1290
1291 return VINF_SUCCESS;
1292}
1293
1294#endif /* !IEM_WITH_CODE_TLB */
1295#ifndef IEM_WITH_SETJMP
1296
1297/**
1298 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1299 *
1300 * @returns Strict VBox status code.
1301 * @param pVCpu The cross context virtual CPU structure of the
1302 * calling thread.
1303 * @param pb Where to return the opcode byte.
1304 */
1305VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1306{
1307 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1308 if (rcStrict == VINF_SUCCESS)
1309 {
1310 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1311 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1312 pVCpu->iem.s.offOpcode = offOpcode + 1;
1313 }
1314 else
1315 *pb = 0;
1316 return rcStrict;
1317}
1318
1319#else /* IEM_WITH_SETJMP */
1320
1321/**
1322 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1323 *
1324 * @returns The opcode byte.
1325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1326 */
1327uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1328{
1329# ifdef IEM_WITH_CODE_TLB
1330 uint8_t u8;
1331 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1332 return u8;
1333# else
1334 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1335 if (rcStrict == VINF_SUCCESS)
1336 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1337 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1338# endif
1339}
1340
1341#endif /* IEM_WITH_SETJMP */
1342
1343#ifndef IEM_WITH_SETJMP
1344
1345/**
1346 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1347 *
1348 * @returns Strict VBox status code.
1349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1350 * @param pu16 Where to return the opcode dword.
1351 */
1352VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1353{
1354 uint8_t u8;
1355 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1356 if (rcStrict == VINF_SUCCESS)
1357 *pu16 = (int8_t)u8;
1358 return rcStrict;
1359}
1360
1361
1362/**
1363 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1364 *
1365 * @returns Strict VBox status code.
1366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1367 * @param pu32 Where to return the opcode dword.
1368 */
1369VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1370{
1371 uint8_t u8;
1372 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1373 if (rcStrict == VINF_SUCCESS)
1374 *pu32 = (int8_t)u8;
1375 return rcStrict;
1376}
1377
1378
1379/**
1380 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1381 *
1382 * @returns Strict VBox status code.
1383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1384 * @param pu64 Where to return the opcode qword.
1385 */
1386VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1387{
1388 uint8_t u8;
1389 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1390 if (rcStrict == VINF_SUCCESS)
1391 *pu64 = (int8_t)u8;
1392 return rcStrict;
1393}
1394
1395#endif /* !IEM_WITH_SETJMP */
1396
1397
1398#ifndef IEM_WITH_SETJMP
1399
1400/**
1401 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1402 *
1403 * @returns Strict VBox status code.
1404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1405 * @param pu16 Where to return the opcode word.
1406 */
1407VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1408{
1409 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1410 if (rcStrict == VINF_SUCCESS)
1411 {
1412 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1413# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1414 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1415# else
1416 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1417# endif
1418 pVCpu->iem.s.offOpcode = offOpcode + 2;
1419 }
1420 else
1421 *pu16 = 0;
1422 return rcStrict;
1423}
1424
1425#else /* IEM_WITH_SETJMP */
1426
1427/**
1428 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1429 *
1430 * @returns The opcode word.
1431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1432 */
1433uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1434{
1435# ifdef IEM_WITH_CODE_TLB
1436 uint16_t u16;
1437 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1438 return u16;
1439# else
1440 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1441 if (rcStrict == VINF_SUCCESS)
1442 {
1443 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1444 pVCpu->iem.s.offOpcode += 2;
1445# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1446 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1447# else
1448 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1449# endif
1450 }
1451 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1452# endif
1453}
1454
1455#endif /* IEM_WITH_SETJMP */
1456
1457#ifndef IEM_WITH_SETJMP
1458
1459/**
1460 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1461 *
1462 * @returns Strict VBox status code.
1463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1464 * @param pu32 Where to return the opcode double word.
1465 */
1466VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1467{
1468 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1469 if (rcStrict == VINF_SUCCESS)
1470 {
1471 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1472 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1473 pVCpu->iem.s.offOpcode = offOpcode + 2;
1474 }
1475 else
1476 *pu32 = 0;
1477 return rcStrict;
1478}
1479
1480
1481/**
1482 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1483 *
1484 * @returns Strict VBox status code.
1485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1486 * @param pu64 Where to return the opcode quad word.
1487 */
1488VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1489{
1490 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1491 if (rcStrict == VINF_SUCCESS)
1492 {
1493 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1494 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1495 pVCpu->iem.s.offOpcode = offOpcode + 2;
1496 }
1497 else
1498 *pu64 = 0;
1499 return rcStrict;
1500}
1501
1502#endif /* !IEM_WITH_SETJMP */
1503
1504#ifndef IEM_WITH_SETJMP
1505
1506/**
1507 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1508 *
1509 * @returns Strict VBox status code.
1510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1511 * @param pu32 Where to return the opcode dword.
1512 */
1513VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1514{
1515 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1516 if (rcStrict == VINF_SUCCESS)
1517 {
1518 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1519# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1520 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1521# else
1522 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1523 pVCpu->iem.s.abOpcode[offOpcode + 1],
1524 pVCpu->iem.s.abOpcode[offOpcode + 2],
1525 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1526# endif
1527 pVCpu->iem.s.offOpcode = offOpcode + 4;
1528 }
1529 else
1530 *pu32 = 0;
1531 return rcStrict;
1532}
1533
1534#else /* IEM_WITH_SETJMP */
1535
1536/**
1537 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1538 *
1539 * @returns The opcode dword.
1540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1541 */
1542uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1543{
1544# ifdef IEM_WITH_CODE_TLB
1545 uint32_t u32;
1546 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1547 return u32;
1548# else
1549 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1550 if (rcStrict == VINF_SUCCESS)
1551 {
1552 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1553 pVCpu->iem.s.offOpcode = offOpcode + 4;
1554# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1555 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1556# else
1557 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1558 pVCpu->iem.s.abOpcode[offOpcode + 1],
1559 pVCpu->iem.s.abOpcode[offOpcode + 2],
1560 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1561# endif
1562 }
1563 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1564# endif
1565}
1566
1567#endif /* IEM_WITH_SETJMP */
1568
1569#ifndef IEM_WITH_SETJMP
1570
1571/**
1572 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1573 *
1574 * @returns Strict VBox status code.
1575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1576 * @param pu64 Where to return the opcode dword.
1577 */
1578VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1579{
1580 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1581 if (rcStrict == VINF_SUCCESS)
1582 {
1583 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1584 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1585 pVCpu->iem.s.abOpcode[offOpcode + 1],
1586 pVCpu->iem.s.abOpcode[offOpcode + 2],
1587 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1588 pVCpu->iem.s.offOpcode = offOpcode + 4;
1589 }
1590 else
1591 *pu64 = 0;
1592 return rcStrict;
1593}
1594
1595
1596/**
1597 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1598 *
1599 * @returns Strict VBox status code.
1600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1601 * @param pu64 Where to return the opcode qword.
1602 */
1603VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1604{
1605 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1606 if (rcStrict == VINF_SUCCESS)
1607 {
1608 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1609 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1610 pVCpu->iem.s.abOpcode[offOpcode + 1],
1611 pVCpu->iem.s.abOpcode[offOpcode + 2],
1612 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1613 pVCpu->iem.s.offOpcode = offOpcode + 4;
1614 }
1615 else
1616 *pu64 = 0;
1617 return rcStrict;
1618}
1619
1620#endif /* !IEM_WITH_SETJMP */
1621
1622#ifndef IEM_WITH_SETJMP
1623
1624/**
1625 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1626 *
1627 * @returns Strict VBox status code.
1628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1629 * @param pu64 Where to return the opcode qword.
1630 */
1631VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1632{
1633 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1634 if (rcStrict == VINF_SUCCESS)
1635 {
1636 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1637# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1638 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1639# else
1640 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1641 pVCpu->iem.s.abOpcode[offOpcode + 1],
1642 pVCpu->iem.s.abOpcode[offOpcode + 2],
1643 pVCpu->iem.s.abOpcode[offOpcode + 3],
1644 pVCpu->iem.s.abOpcode[offOpcode + 4],
1645 pVCpu->iem.s.abOpcode[offOpcode + 5],
1646 pVCpu->iem.s.abOpcode[offOpcode + 6],
1647 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1648# endif
1649 pVCpu->iem.s.offOpcode = offOpcode + 8;
1650 }
1651 else
1652 *pu64 = 0;
1653 return rcStrict;
1654}
1655
1656#else /* IEM_WITH_SETJMP */
1657
1658/**
1659 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1660 *
1661 * @returns The opcode qword.
1662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1663 */
1664uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1665{
1666# ifdef IEM_WITH_CODE_TLB
1667 uint64_t u64;
1668 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1669 return u64;
1670# else
1671 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1672 if (rcStrict == VINF_SUCCESS)
1673 {
1674 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1675 pVCpu->iem.s.offOpcode = offOpcode + 8;
1676# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1677 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1678# else
1679 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1680 pVCpu->iem.s.abOpcode[offOpcode + 1],
1681 pVCpu->iem.s.abOpcode[offOpcode + 2],
1682 pVCpu->iem.s.abOpcode[offOpcode + 3],
1683 pVCpu->iem.s.abOpcode[offOpcode + 4],
1684 pVCpu->iem.s.abOpcode[offOpcode + 5],
1685 pVCpu->iem.s.abOpcode[offOpcode + 6],
1686 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1687# endif
1688 }
1689 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1690# endif
1691}
1692
1693#endif /* IEM_WITH_SETJMP */
1694
1695
1696
1697/** @name Misc Worker Functions.
1698 * @{
1699 */
1700
1701/**
1702 * Gets the exception class for the specified exception vector.
1703 *
1704 * @returns The class of the specified exception.
1705 * @param uVector The exception vector.
1706 */
1707static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1708{
1709 Assert(uVector <= X86_XCPT_LAST);
1710 switch (uVector)
1711 {
1712 case X86_XCPT_DE:
1713 case X86_XCPT_TS:
1714 case X86_XCPT_NP:
1715 case X86_XCPT_SS:
1716 case X86_XCPT_GP:
1717 case X86_XCPT_SX: /* AMD only */
1718 return IEMXCPTCLASS_CONTRIBUTORY;
1719
1720 case X86_XCPT_PF:
1721 case X86_XCPT_VE: /* Intel only */
1722 return IEMXCPTCLASS_PAGE_FAULT;
1723
1724 case X86_XCPT_DF:
1725 return IEMXCPTCLASS_DOUBLE_FAULT;
1726 }
1727 return IEMXCPTCLASS_BENIGN;
1728}
1729
1730
1731/**
1732 * Evaluates how to handle an exception caused during delivery of another event
1733 * (exception / interrupt).
1734 *
1735 * @returns How to handle the recursive exception.
1736 * @param pVCpu The cross context virtual CPU structure of the
1737 * calling thread.
1738 * @param fPrevFlags The flags of the previous event.
1739 * @param uPrevVector The vector of the previous event.
1740 * @param fCurFlags The flags of the current exception.
1741 * @param uCurVector The vector of the current exception.
1742 * @param pfXcptRaiseInfo Where to store additional information about the
1743 * exception condition. Optional.
1744 */
1745VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1746 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1747{
1748 /*
1749 * Only CPU exceptions can be raised while delivering other events, software interrupt
1750 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1751 */
1752 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1753 Assert(pVCpu); RT_NOREF(pVCpu);
1754 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1755
1756 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1757 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1758 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1759 {
1760 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1761 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1762 {
1763 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1764 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1765 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1766 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1767 {
1768 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1769 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1770 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1771 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1772 uCurVector, pVCpu->cpum.GstCtx.cr2));
1773 }
1774 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1775 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1776 {
1777 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1778 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1779 }
1780 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1781 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1782 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1783 {
1784 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1785 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1786 }
1787 }
1788 else
1789 {
1790 if (uPrevVector == X86_XCPT_NMI)
1791 {
1792 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1793 if (uCurVector == X86_XCPT_PF)
1794 {
1795 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1796 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1797 }
1798 }
1799 else if ( uPrevVector == X86_XCPT_AC
1800 && uCurVector == X86_XCPT_AC)
1801 {
1802 enmRaise = IEMXCPTRAISE_CPU_HANG;
1803 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1804 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1805 }
1806 }
1807 }
1808 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1809 {
1810 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1811 if (uCurVector == X86_XCPT_PF)
1812 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1813 }
1814 else
1815 {
1816 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1817 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1818 }
1819
1820 if (pfXcptRaiseInfo)
1821 *pfXcptRaiseInfo = fRaiseInfo;
1822 return enmRaise;
1823}
1824
1825
1826/**
1827 * Enters the CPU shutdown state initiated by a triple fault or other
1828 * unrecoverable conditions.
1829 *
1830 * @returns Strict VBox status code.
1831 * @param pVCpu The cross context virtual CPU structure of the
1832 * calling thread.
1833 */
1834static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1835{
1836 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1837 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1838
1839 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1840 {
1841 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1842 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1843 }
1844
1845 RT_NOREF(pVCpu);
1846 return VINF_EM_TRIPLE_FAULT;
1847}
1848
1849
1850/**
1851 * Validates a new SS segment.
1852 *
1853 * @returns VBox strict status code.
1854 * @param pVCpu The cross context virtual CPU structure of the
1855 * calling thread.
1856 * @param NewSS The new SS selctor.
1857 * @param uCpl The CPL to load the stack for.
1858 * @param pDesc Where to return the descriptor.
1859 */
1860static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1861{
1862 /* Null selectors are not allowed (we're not called for dispatching
1863 interrupts with SS=0 in long mode). */
1864 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1865 {
1866 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1867 return iemRaiseTaskSwitchFault0(pVCpu);
1868 }
1869
1870 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1871 if ((NewSS & X86_SEL_RPL) != uCpl)
1872 {
1873 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1874 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1875 }
1876
1877 /*
1878 * Read the descriptor.
1879 */
1880 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1881 if (rcStrict != VINF_SUCCESS)
1882 return rcStrict;
1883
1884 /*
1885 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1886 */
1887 if (!pDesc->Legacy.Gen.u1DescType)
1888 {
1889 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1890 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1891 }
1892
1893 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1894 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1895 {
1896 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1897 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1898 }
1899 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1900 {
1901 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1902 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1903 }
1904
1905 /* Is it there? */
1906 /** @todo testcase: Is this checked before the canonical / limit check below? */
1907 if (!pDesc->Legacy.Gen.u1Present)
1908 {
1909 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1910 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1911 }
1912
1913 return VINF_SUCCESS;
1914}
1915
1916/** @} */
1917
1918
1919/** @name Raising Exceptions.
1920 *
1921 * @{
1922 */
1923
1924
1925/**
1926 * Loads the specified stack far pointer from the TSS.
1927 *
1928 * @returns VBox strict status code.
1929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1930 * @param uCpl The CPL to load the stack for.
1931 * @param pSelSS Where to return the new stack segment.
1932 * @param puEsp Where to return the new stack pointer.
1933 */
1934static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1935{
1936 VBOXSTRICTRC rcStrict;
1937 Assert(uCpl < 4);
1938
1939 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1940 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1941 {
1942 /*
1943 * 16-bit TSS (X86TSS16).
1944 */
1945 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1946 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1947 {
1948 uint32_t off = uCpl * 4 + 2;
1949 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1950 {
1951 /** @todo check actual access pattern here. */
1952 uint32_t u32Tmp = 0; /* gcc maybe... */
1953 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1954 if (rcStrict == VINF_SUCCESS)
1955 {
1956 *puEsp = RT_LOWORD(u32Tmp);
1957 *pSelSS = RT_HIWORD(u32Tmp);
1958 return VINF_SUCCESS;
1959 }
1960 }
1961 else
1962 {
1963 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1964 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1965 }
1966 break;
1967 }
1968
1969 /*
1970 * 32-bit TSS (X86TSS32).
1971 */
1972 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1973 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1974 {
1975 uint32_t off = uCpl * 8 + 4;
1976 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1977 {
1978/** @todo check actual access pattern here. */
1979 uint64_t u64Tmp;
1980 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1981 if (rcStrict == VINF_SUCCESS)
1982 {
1983 *puEsp = u64Tmp & UINT32_MAX;
1984 *pSelSS = (RTSEL)(u64Tmp >> 32);
1985 return VINF_SUCCESS;
1986 }
1987 }
1988 else
1989 {
1990 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1991 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1992 }
1993 break;
1994 }
1995
1996 default:
1997 AssertFailed();
1998 rcStrict = VERR_IEM_IPE_4;
1999 break;
2000 }
2001
2002 *puEsp = 0; /* make gcc happy */
2003 *pSelSS = 0; /* make gcc happy */
2004 return rcStrict;
2005}
2006
2007
2008/**
2009 * Loads the specified stack pointer from the 64-bit TSS.
2010 *
2011 * @returns VBox strict status code.
2012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2013 * @param uCpl The CPL to load the stack for.
2014 * @param uIst The interrupt stack table index, 0 if to use uCpl.
2015 * @param puRsp Where to return the new stack pointer.
2016 */
2017static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
2018{
2019 Assert(uCpl < 4);
2020 Assert(uIst < 8);
2021 *puRsp = 0; /* make gcc happy */
2022
2023 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2024 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
2025
2026 uint32_t off;
2027 if (uIst)
2028 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
2029 else
2030 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
2031 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
2032 {
2033 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
2034 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
2035 }
2036
2037 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
2038}
2039
2040
2041/**
2042 * Adjust the CPU state according to the exception being raised.
2043 *
2044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2045 * @param u8Vector The exception that has been raised.
2046 */
2047DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
2048{
2049 switch (u8Vector)
2050 {
2051 case X86_XCPT_DB:
2052 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
2053 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2054 break;
2055 /** @todo Read the AMD and Intel exception reference... */
2056 }
2057}
2058
2059
2060/**
2061 * Implements exceptions and interrupts for real mode.
2062 *
2063 * @returns VBox strict status code.
2064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2065 * @param cbInstr The number of bytes to offset rIP by in the return
2066 * address.
2067 * @param u8Vector The interrupt / exception vector number.
2068 * @param fFlags The flags.
2069 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2070 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2071 */
2072static VBOXSTRICTRC
2073iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
2074 uint8_t cbInstr,
2075 uint8_t u8Vector,
2076 uint32_t fFlags,
2077 uint16_t uErr,
2078 uint64_t uCr2) RT_NOEXCEPT
2079{
2080 NOREF(uErr); NOREF(uCr2);
2081 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2082
2083 /*
2084 * Read the IDT entry.
2085 */
2086 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
2087 {
2088 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2089 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2090 }
2091 RTFAR16 Idte;
2092 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
2093 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2094 {
2095 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2096 return rcStrict;
2097 }
2098
2099 /*
2100 * Push the stack frame.
2101 */
2102 uint16_t *pu16Frame;
2103 uint64_t uNewRsp;
2104 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
2105 if (rcStrict != VINF_SUCCESS)
2106 return rcStrict;
2107
2108 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2109#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2110 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
2111 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
2112 fEfl |= UINT16_C(0xf000);
2113#endif
2114 pu16Frame[2] = (uint16_t)fEfl;
2115 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
2116 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
2117 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
2118 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2119 return rcStrict;
2120
2121 /*
2122 * Load the vector address into cs:ip and make exception specific state
2123 * adjustments.
2124 */
2125 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
2126 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
2127 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2128 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
2129 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2130 pVCpu->cpum.GstCtx.rip = Idte.off;
2131 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
2132 IEMMISC_SET_EFL(pVCpu, fEfl);
2133
2134 /** @todo do we actually do this in real mode? */
2135 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2136 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2137
2138 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
2139 so best leave them alone in case we're in a weird kind of real mode... */
2140
2141 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2142}
2143
2144
2145/**
2146 * Loads a NULL data selector into when coming from V8086 mode.
2147 *
2148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2149 * @param pSReg Pointer to the segment register.
2150 */
2151DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
2152{
2153 pSReg->Sel = 0;
2154 pSReg->ValidSel = 0;
2155 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2156 {
2157 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
2158 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
2159 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
2160 }
2161 else
2162 {
2163 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2164 /** @todo check this on AMD-V */
2165 pSReg->u64Base = 0;
2166 pSReg->u32Limit = 0;
2167 }
2168}
2169
2170
2171/**
2172 * Loads a segment selector during a task switch in V8086 mode.
2173 *
2174 * @param pSReg Pointer to the segment register.
2175 * @param uSel The selector value to load.
2176 */
2177DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
2178{
2179 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
2180 pSReg->Sel = uSel;
2181 pSReg->ValidSel = uSel;
2182 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2183 pSReg->u64Base = uSel << 4;
2184 pSReg->u32Limit = 0xffff;
2185 pSReg->Attr.u = 0xf3;
2186}
2187
2188
2189/**
2190 * Loads a segment selector during a task switch in protected mode.
2191 *
2192 * In this task switch scenario, we would throw \#TS exceptions rather than
2193 * \#GPs.
2194 *
2195 * @returns VBox strict status code.
2196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2197 * @param pSReg Pointer to the segment register.
2198 * @param uSel The new selector value.
2199 *
2200 * @remarks This does _not_ handle CS or SS.
2201 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
2202 */
2203static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2204{
2205 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2206
2207 /* Null data selector. */
2208 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2209 {
2210 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2212 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2213 return VINF_SUCCESS;
2214 }
2215
2216 /* Fetch the descriptor. */
2217 IEMSELDESC Desc;
2218 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2219 if (rcStrict != VINF_SUCCESS)
2220 {
2221 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2222 VBOXSTRICTRC_VAL(rcStrict)));
2223 return rcStrict;
2224 }
2225
2226 /* Must be a data segment or readable code segment. */
2227 if ( !Desc.Legacy.Gen.u1DescType
2228 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2229 {
2230 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2231 Desc.Legacy.Gen.u4Type));
2232 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2233 }
2234
2235 /* Check privileges for data segments and non-conforming code segments. */
2236 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2237 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2238 {
2239 /* The RPL and the new CPL must be less than or equal to the DPL. */
2240 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2241 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
2242 {
2243 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2244 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2245 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2246 }
2247 }
2248
2249 /* Is it there? */
2250 if (!Desc.Legacy.Gen.u1Present)
2251 {
2252 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2253 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2254 }
2255
2256 /* The base and limit. */
2257 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2258 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2259
2260 /*
2261 * Ok, everything checked out fine. Now set the accessed bit before
2262 * committing the result into the registers.
2263 */
2264 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2265 {
2266 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2267 if (rcStrict != VINF_SUCCESS)
2268 return rcStrict;
2269 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2270 }
2271
2272 /* Commit */
2273 pSReg->Sel = uSel;
2274 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2275 pSReg->u32Limit = cbLimit;
2276 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2277 pSReg->ValidSel = uSel;
2278 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2279 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2280 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2281
2282 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2283 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2284 return VINF_SUCCESS;
2285}
2286
2287
2288/**
2289 * Performs a task switch.
2290 *
2291 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2292 * caller is responsible for performing the necessary checks (like DPL, TSS
2293 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2294 * reference for JMP, CALL, IRET.
2295 *
2296 * If the task switch is the due to a software interrupt or hardware exception,
2297 * the caller is responsible for validating the TSS selector and descriptor. See
2298 * Intel Instruction reference for INT n.
2299 *
2300 * @returns VBox strict status code.
2301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2302 * @param enmTaskSwitch The cause of the task switch.
2303 * @param uNextEip The EIP effective after the task switch.
2304 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2305 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2306 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2307 * @param SelTSS The TSS selector of the new task.
2308 * @param pNewDescTSS Pointer to the new TSS descriptor.
2309 */
2310VBOXSTRICTRC
2311iemTaskSwitch(PVMCPUCC pVCpu,
2312 IEMTASKSWITCH enmTaskSwitch,
2313 uint32_t uNextEip,
2314 uint32_t fFlags,
2315 uint16_t uErr,
2316 uint64_t uCr2,
2317 RTSEL SelTSS,
2318 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2319{
2320 Assert(!IEM_IS_REAL_MODE(pVCpu));
2321 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2322 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2323
2324 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2325 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2326 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2327 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2328 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2329
2330 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2331 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2332
2333 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2334 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2335
2336 /* Update CR2 in case it's a page-fault. */
2337 /** @todo This should probably be done much earlier in IEM/PGM. See
2338 * @bugref{5653#c49}. */
2339 if (fFlags & IEM_XCPT_FLAGS_CR2)
2340 pVCpu->cpum.GstCtx.cr2 = uCr2;
2341
2342 /*
2343 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2344 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2345 */
2346 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2347 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2348 if (uNewTSSLimit < uNewTSSLimitMin)
2349 {
2350 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2351 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2352 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2353 }
2354
2355 /*
2356 * Task switches in VMX non-root mode always cause task switches.
2357 * The new TSS must have been read and validated (DPL, limits etc.) before a
2358 * task-switch VM-exit commences.
2359 *
2360 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2361 */
2362 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2363 {
2364 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2365 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2366 }
2367
2368 /*
2369 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2370 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2371 */
2372 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2373 {
2374 uint32_t const uExitInfo1 = SelTSS;
2375 uint32_t uExitInfo2 = uErr;
2376 switch (enmTaskSwitch)
2377 {
2378 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2379 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2380 default: break;
2381 }
2382 if (fFlags & IEM_XCPT_FLAGS_ERR)
2383 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2384 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2385 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2386
2387 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2388 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2389 RT_NOREF2(uExitInfo1, uExitInfo2);
2390 }
2391
2392 /*
2393 * Check the current TSS limit. The last written byte to the current TSS during the
2394 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2395 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2396 *
2397 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2398 * end up with smaller than "legal" TSS limits.
2399 */
2400 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2401 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2402 if (uCurTSSLimit < uCurTSSLimitMin)
2403 {
2404 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2405 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2406 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2407 }
2408
2409 /*
2410 * Verify that the new TSS can be accessed and map it. Map only the required contents
2411 * and not the entire TSS.
2412 */
2413 void *pvNewTSS;
2414 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2415 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2416 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2417 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2418 * not perform correct translation if this happens. See Intel spec. 7.2.1
2419 * "Task-State Segment". */
2420 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2421 if (rcStrict != VINF_SUCCESS)
2422 {
2423 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2424 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2425 return rcStrict;
2426 }
2427
2428 /*
2429 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2430 */
2431 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
2432 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2433 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2434 {
2435 PX86DESC pDescCurTSS;
2436 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2437 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2438 if (rcStrict != VINF_SUCCESS)
2439 {
2440 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2441 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2442 return rcStrict;
2443 }
2444
2445 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2446 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2447 if (rcStrict != VINF_SUCCESS)
2448 {
2449 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2450 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2451 return rcStrict;
2452 }
2453
2454 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2455 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2456 {
2457 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2458 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2459 fEFlags &= ~X86_EFL_NT;
2460 }
2461 }
2462
2463 /*
2464 * Save the CPU state into the current TSS.
2465 */
2466 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2467 if (GCPtrNewTSS == GCPtrCurTSS)
2468 {
2469 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2470 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2471 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
2472 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2473 pVCpu->cpum.GstCtx.ldtr.Sel));
2474 }
2475 if (fIsNewTSS386)
2476 {
2477 /*
2478 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2479 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2480 */
2481 void *pvCurTSS32;
2482 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2483 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2484 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2485 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2486 if (rcStrict != VINF_SUCCESS)
2487 {
2488 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2489 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2490 return rcStrict;
2491 }
2492
2493 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2494 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2495 pCurTSS32->eip = uNextEip;
2496 pCurTSS32->eflags = fEFlags;
2497 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2498 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2499 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2500 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2501 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2502 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2503 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2504 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2505 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2506 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2507 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2508 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2509 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2510 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2511
2512 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2513 if (rcStrict != VINF_SUCCESS)
2514 {
2515 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2516 VBOXSTRICTRC_VAL(rcStrict)));
2517 return rcStrict;
2518 }
2519 }
2520 else
2521 {
2522 /*
2523 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2524 */
2525 void *pvCurTSS16;
2526 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2527 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2528 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2529 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2530 if (rcStrict != VINF_SUCCESS)
2531 {
2532 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2533 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2534 return rcStrict;
2535 }
2536
2537 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2538 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2539 pCurTSS16->ip = uNextEip;
2540 pCurTSS16->flags = (uint16_t)fEFlags;
2541 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2542 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2543 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2544 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2545 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2546 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2547 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2548 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2549 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2550 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2551 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2552 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2553
2554 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2555 if (rcStrict != VINF_SUCCESS)
2556 {
2557 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2558 VBOXSTRICTRC_VAL(rcStrict)));
2559 return rcStrict;
2560 }
2561 }
2562
2563 /*
2564 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2565 */
2566 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2567 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2568 {
2569 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2570 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2571 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2572 }
2573
2574 /*
2575 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2576 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2577 */
2578 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2579 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2580 bool fNewDebugTrap;
2581 if (fIsNewTSS386)
2582 {
2583 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2584 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2585 uNewEip = pNewTSS32->eip;
2586 uNewEflags = pNewTSS32->eflags;
2587 uNewEax = pNewTSS32->eax;
2588 uNewEcx = pNewTSS32->ecx;
2589 uNewEdx = pNewTSS32->edx;
2590 uNewEbx = pNewTSS32->ebx;
2591 uNewEsp = pNewTSS32->esp;
2592 uNewEbp = pNewTSS32->ebp;
2593 uNewEsi = pNewTSS32->esi;
2594 uNewEdi = pNewTSS32->edi;
2595 uNewES = pNewTSS32->es;
2596 uNewCS = pNewTSS32->cs;
2597 uNewSS = pNewTSS32->ss;
2598 uNewDS = pNewTSS32->ds;
2599 uNewFS = pNewTSS32->fs;
2600 uNewGS = pNewTSS32->gs;
2601 uNewLdt = pNewTSS32->selLdt;
2602 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2603 }
2604 else
2605 {
2606 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2607 uNewCr3 = 0;
2608 uNewEip = pNewTSS16->ip;
2609 uNewEflags = pNewTSS16->flags;
2610 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2611 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2612 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2613 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2614 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2615 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2616 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2617 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2618 uNewES = pNewTSS16->es;
2619 uNewCS = pNewTSS16->cs;
2620 uNewSS = pNewTSS16->ss;
2621 uNewDS = pNewTSS16->ds;
2622 uNewFS = 0;
2623 uNewGS = 0;
2624 uNewLdt = pNewTSS16->selLdt;
2625 fNewDebugTrap = false;
2626 }
2627
2628 if (GCPtrNewTSS == GCPtrCurTSS)
2629 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2630 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2631
2632 /*
2633 * We're done accessing the new TSS.
2634 */
2635 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2636 if (rcStrict != VINF_SUCCESS)
2637 {
2638 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2639 return rcStrict;
2640 }
2641
2642 /*
2643 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2644 */
2645 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2646 {
2647 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2648 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2649 if (rcStrict != VINF_SUCCESS)
2650 {
2651 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2652 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2653 return rcStrict;
2654 }
2655
2656 /* Check that the descriptor indicates the new TSS is available (not busy). */
2657 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2658 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2659 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2660
2661 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2662 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2663 if (rcStrict != VINF_SUCCESS)
2664 {
2665 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2666 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2667 return rcStrict;
2668 }
2669 }
2670
2671 /*
2672 * From this point on, we're technically in the new task. We will defer exceptions
2673 * until the completion of the task switch but before executing any instructions in the new task.
2674 */
2675 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2676 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2677 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2678 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2679 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2680 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2681 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2682
2683 /* Set the busy bit in TR. */
2684 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2685
2686 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2687 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2688 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2689 {
2690 uNewEflags |= X86_EFL_NT;
2691 }
2692
2693 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2694 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2695 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2696
2697 pVCpu->cpum.GstCtx.eip = uNewEip;
2698 pVCpu->cpum.GstCtx.eax = uNewEax;
2699 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2700 pVCpu->cpum.GstCtx.edx = uNewEdx;
2701 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2702 pVCpu->cpum.GstCtx.esp = uNewEsp;
2703 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2704 pVCpu->cpum.GstCtx.esi = uNewEsi;
2705 pVCpu->cpum.GstCtx.edi = uNewEdi;
2706
2707 uNewEflags &= X86_EFL_LIVE_MASK;
2708 uNewEflags |= X86_EFL_RA1_MASK;
2709 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2710
2711 /*
2712 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2713 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2714 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2715 */
2716 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2717 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2718
2719 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2720 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2721
2722 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2723 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2724
2725 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2726 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2727
2728 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2729 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2730
2731 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2732 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2733 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2734
2735 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2736 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2737 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2738 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2739
2740 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2741 {
2742 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2743 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2744 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2745 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2746 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2747 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2748 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2749 }
2750
2751 /*
2752 * Switch CR3 for the new task.
2753 */
2754 if ( fIsNewTSS386
2755 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2756 {
2757 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2758 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2759 AssertRCSuccessReturn(rc, rc);
2760
2761 /* Inform PGM. */
2762 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2763 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2764 AssertRCReturn(rc, rc);
2765 /* ignore informational status codes */
2766
2767 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2768 }
2769
2770 /*
2771 * Switch LDTR for the new task.
2772 */
2773 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2774 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2775 else
2776 {
2777 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2778
2779 IEMSELDESC DescNewLdt;
2780 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2781 if (rcStrict != VINF_SUCCESS)
2782 {
2783 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2784 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2785 return rcStrict;
2786 }
2787 if ( !DescNewLdt.Legacy.Gen.u1Present
2788 || DescNewLdt.Legacy.Gen.u1DescType
2789 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2790 {
2791 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2792 uNewLdt, DescNewLdt.Legacy.u));
2793 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2794 }
2795
2796 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2797 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2798 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2799 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2800 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2801 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2802 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2804 }
2805
2806 IEMSELDESC DescSS;
2807 if (IEM_IS_V86_MODE(pVCpu))
2808 {
2809 IEM_SET_CPL(pVCpu, 3);
2810 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2811 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2812 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2813 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2814 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2815 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2816
2817 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2818 DescSS.Legacy.u = 0;
2819 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2820 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2821 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2822 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2823 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2824 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2825 DescSS.Legacy.Gen.u2Dpl = 3;
2826 }
2827 else
2828 {
2829 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2830
2831 /*
2832 * Load the stack segment for the new task.
2833 */
2834 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2835 {
2836 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2837 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2838 }
2839
2840 /* Fetch the descriptor. */
2841 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2842 if (rcStrict != VINF_SUCCESS)
2843 {
2844 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2845 VBOXSTRICTRC_VAL(rcStrict)));
2846 return rcStrict;
2847 }
2848
2849 /* SS must be a data segment and writable. */
2850 if ( !DescSS.Legacy.Gen.u1DescType
2851 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2852 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2853 {
2854 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2855 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2856 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2857 }
2858
2859 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2860 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2861 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2862 {
2863 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2864 uNewCpl));
2865 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2866 }
2867
2868 /* Is it there? */
2869 if (!DescSS.Legacy.Gen.u1Present)
2870 {
2871 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2872 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2873 }
2874
2875 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2876 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2877
2878 /* Set the accessed bit before committing the result into SS. */
2879 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2880 {
2881 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2882 if (rcStrict != VINF_SUCCESS)
2883 return rcStrict;
2884 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2885 }
2886
2887 /* Commit SS. */
2888 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2889 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2890 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2891 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2892 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2893 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2894 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2895
2896 /* CPL has changed, update IEM before loading rest of segments. */
2897 IEM_SET_CPL(pVCpu, uNewCpl);
2898
2899 /*
2900 * Load the data segments for the new task.
2901 */
2902 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2903 if (rcStrict != VINF_SUCCESS)
2904 return rcStrict;
2905 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2906 if (rcStrict != VINF_SUCCESS)
2907 return rcStrict;
2908 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2909 if (rcStrict != VINF_SUCCESS)
2910 return rcStrict;
2911 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2912 if (rcStrict != VINF_SUCCESS)
2913 return rcStrict;
2914
2915 /*
2916 * Load the code segment for the new task.
2917 */
2918 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2919 {
2920 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2921 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2922 }
2923
2924 /* Fetch the descriptor. */
2925 IEMSELDESC DescCS;
2926 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2927 if (rcStrict != VINF_SUCCESS)
2928 {
2929 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2930 return rcStrict;
2931 }
2932
2933 /* CS must be a code segment. */
2934 if ( !DescCS.Legacy.Gen.u1DescType
2935 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2936 {
2937 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2938 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2939 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2940 }
2941
2942 /* For conforming CS, DPL must be less than or equal to the RPL. */
2943 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2944 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2945 {
2946 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2947 DescCS.Legacy.Gen.u2Dpl));
2948 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2949 }
2950
2951 /* For non-conforming CS, DPL must match RPL. */
2952 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2953 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2954 {
2955 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2956 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2957 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2958 }
2959
2960 /* Is it there? */
2961 if (!DescCS.Legacy.Gen.u1Present)
2962 {
2963 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2964 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2965 }
2966
2967 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2968 u64Base = X86DESC_BASE(&DescCS.Legacy);
2969
2970 /* Set the accessed bit before committing the result into CS. */
2971 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2972 {
2973 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2974 if (rcStrict != VINF_SUCCESS)
2975 return rcStrict;
2976 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2977 }
2978
2979 /* Commit CS. */
2980 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2981 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2982 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2983 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2984 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2985 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2986 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2987 }
2988
2989 /* Make sure the CPU mode is correct. */
2990 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
2991 if (fExecNew != pVCpu->iem.s.fExec)
2992 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
2993 pVCpu->iem.s.fExec = fExecNew;
2994
2995 /** @todo Debug trap. */
2996 if (fIsNewTSS386 && fNewDebugTrap)
2997 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2998
2999 /*
3000 * Construct the error code masks based on what caused this task switch.
3001 * See Intel Instruction reference for INT.
3002 */
3003 uint16_t uExt;
3004 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
3005 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3006 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
3007 uExt = 1;
3008 else
3009 uExt = 0;
3010
3011 /*
3012 * Push any error code on to the new stack.
3013 */
3014 if (fFlags & IEM_XCPT_FLAGS_ERR)
3015 {
3016 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
3017 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3018 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
3019
3020 /* Check that there is sufficient space on the stack. */
3021 /** @todo Factor out segment limit checking for normal/expand down segments
3022 * into a separate function. */
3023 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3024 {
3025 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
3026 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
3027 {
3028 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3029 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
3030 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3031 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3032 }
3033 }
3034 else
3035 {
3036 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
3037 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
3038 {
3039 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
3040 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
3041 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
3042 }
3043 }
3044
3045
3046 if (fIsNewTSS386)
3047 rcStrict = iemMemStackPushU32(pVCpu, uErr);
3048 else
3049 rcStrict = iemMemStackPushU16(pVCpu, uErr);
3050 if (rcStrict != VINF_SUCCESS)
3051 {
3052 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
3053 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
3054 return rcStrict;
3055 }
3056 }
3057
3058 /* Check the new EIP against the new CS limit. */
3059 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
3060 {
3061 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
3062 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
3063 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
3064 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
3065 }
3066
3067 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3068 pVCpu->cpum.GstCtx.ss.Sel));
3069 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3070}
3071
3072
3073/**
3074 * Implements exceptions and interrupts for protected mode.
3075 *
3076 * @returns VBox strict status code.
3077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3078 * @param cbInstr The number of bytes to offset rIP by in the return
3079 * address.
3080 * @param u8Vector The interrupt / exception vector number.
3081 * @param fFlags The flags.
3082 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3083 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3084 */
3085static VBOXSTRICTRC
3086iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
3087 uint8_t cbInstr,
3088 uint8_t u8Vector,
3089 uint32_t fFlags,
3090 uint16_t uErr,
3091 uint64_t uCr2) RT_NOEXCEPT
3092{
3093 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3094
3095 /*
3096 * Read the IDT entry.
3097 */
3098 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
3099 {
3100 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3101 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3102 }
3103 X86DESC Idte;
3104 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
3105 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
3106 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3107 {
3108 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3109 return rcStrict;
3110 }
3111 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
3112 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3113 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
3114 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
3115
3116 /*
3117 * Check the descriptor type, DPL and such.
3118 * ASSUMES this is done in the same order as described for call-gate calls.
3119 */
3120 if (Idte.Gate.u1DescType)
3121 {
3122 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3123 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3124 }
3125 bool fTaskGate = false;
3126 uint8_t f32BitGate = true;
3127 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3128 switch (Idte.Gate.u4Type)
3129 {
3130 case X86_SEL_TYPE_SYS_UNDEFINED:
3131 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
3132 case X86_SEL_TYPE_SYS_LDT:
3133 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3134 case X86_SEL_TYPE_SYS_286_CALL_GATE:
3135 case X86_SEL_TYPE_SYS_UNDEFINED2:
3136 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
3137 case X86_SEL_TYPE_SYS_UNDEFINED3:
3138 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3139 case X86_SEL_TYPE_SYS_386_CALL_GATE:
3140 case X86_SEL_TYPE_SYS_UNDEFINED4:
3141 {
3142 /** @todo check what actually happens when the type is wrong...
3143 * esp. call gates. */
3144 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3145 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3146 }
3147
3148 case X86_SEL_TYPE_SYS_286_INT_GATE:
3149 f32BitGate = false;
3150 RT_FALL_THRU();
3151 case X86_SEL_TYPE_SYS_386_INT_GATE:
3152 fEflToClear |= X86_EFL_IF;
3153 break;
3154
3155 case X86_SEL_TYPE_SYS_TASK_GATE:
3156 fTaskGate = true;
3157#ifndef IEM_IMPLEMENTS_TASKSWITCH
3158 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
3159#endif
3160 break;
3161
3162 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
3163 f32BitGate = false;
3164 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
3165 break;
3166
3167 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3168 }
3169
3170 /* Check DPL against CPL if applicable. */
3171 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3172 {
3173 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3174 {
3175 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3176 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3177 }
3178 }
3179
3180 /* Is it there? */
3181 if (!Idte.Gate.u1Present)
3182 {
3183 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
3184 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3185 }
3186
3187 /* Is it a task-gate? */
3188 if (fTaskGate)
3189 {
3190 /*
3191 * Construct the error code masks based on what caused this task switch.
3192 * See Intel Instruction reference for INT.
3193 */
3194 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3195 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
3196 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
3197 RTSEL SelTSS = Idte.Gate.u16Sel;
3198
3199 /*
3200 * Fetch the TSS descriptor in the GDT.
3201 */
3202 IEMSELDESC DescTSS;
3203 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
3204 if (rcStrict != VINF_SUCCESS)
3205 {
3206 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3207 VBOXSTRICTRC_VAL(rcStrict)));
3208 return rcStrict;
3209 }
3210
3211 /* The TSS descriptor must be a system segment and be available (not busy). */
3212 if ( DescTSS.Legacy.Gen.u1DescType
3213 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3214 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3215 {
3216 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3217 u8Vector, SelTSS, DescTSS.Legacy.au64));
3218 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3219 }
3220
3221 /* The TSS must be present. */
3222 if (!DescTSS.Legacy.Gen.u1Present)
3223 {
3224 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3225 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3226 }
3227
3228 /* Do the actual task switch. */
3229 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3230 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3231 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3232 }
3233
3234 /* A null CS is bad. */
3235 RTSEL NewCS = Idte.Gate.u16Sel;
3236 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3237 {
3238 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3239 return iemRaiseGeneralProtectionFault0(pVCpu);
3240 }
3241
3242 /* Fetch the descriptor for the new CS. */
3243 IEMSELDESC DescCS;
3244 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3245 if (rcStrict != VINF_SUCCESS)
3246 {
3247 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3248 return rcStrict;
3249 }
3250
3251 /* Must be a code segment. */
3252 if (!DescCS.Legacy.Gen.u1DescType)
3253 {
3254 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3255 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3256 }
3257 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3258 {
3259 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3260 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3261 }
3262
3263 /* Don't allow lowering the privilege level. */
3264 /** @todo Does the lowering of privileges apply to software interrupts
3265 * only? This has bearings on the more-privileged or
3266 * same-privilege stack behavior further down. A testcase would
3267 * be nice. */
3268 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3269 {
3270 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3271 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3272 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3273 }
3274
3275 /* Make sure the selector is present. */
3276 if (!DescCS.Legacy.Gen.u1Present)
3277 {
3278 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3279 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3280 }
3281
3282#ifdef LOG_ENABLED
3283 /* If software interrupt, try decode it if logging is enabled and such. */
3284 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3285 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
3286 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
3287#endif
3288
3289 /* Check the new EIP against the new CS limit. */
3290 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3291 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3292 ? Idte.Gate.u16OffsetLow
3293 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3294 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3295 if (uNewEip > cbLimitCS)
3296 {
3297 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3298 u8Vector, uNewEip, cbLimitCS, NewCS));
3299 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3300 }
3301 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3302
3303 /* Calc the flag image to push. */
3304 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3305 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3306 fEfl &= ~X86_EFL_RF;
3307 else
3308 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3309
3310 /* From V8086 mode only go to CPL 0. */
3311 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3312 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3313 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3314 {
3315 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3316 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3317 }
3318
3319 /*
3320 * If the privilege level changes, we need to get a new stack from the TSS.
3321 * This in turns means validating the new SS and ESP...
3322 */
3323 if (uNewCpl != IEM_GET_CPL(pVCpu))
3324 {
3325 RTSEL NewSS;
3326 uint32_t uNewEsp;
3327 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3328 if (rcStrict != VINF_SUCCESS)
3329 return rcStrict;
3330
3331 IEMSELDESC DescSS;
3332 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3333 if (rcStrict != VINF_SUCCESS)
3334 return rcStrict;
3335 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3336 if (!DescSS.Legacy.Gen.u1DefBig)
3337 {
3338 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3339 uNewEsp = (uint16_t)uNewEsp;
3340 }
3341
3342 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3343
3344 /* Check that there is sufficient space for the stack frame. */
3345 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3346 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3347 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3348 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3349
3350 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3351 {
3352 if ( uNewEsp - 1 > cbLimitSS
3353 || uNewEsp < cbStackFrame)
3354 {
3355 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3356 u8Vector, NewSS, uNewEsp, cbStackFrame));
3357 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3358 }
3359 }
3360 else
3361 {
3362 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3363 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3364 {
3365 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3366 u8Vector, NewSS, uNewEsp, cbStackFrame));
3367 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3368 }
3369 }
3370
3371 /*
3372 * Start making changes.
3373 */
3374
3375 /* Set the new CPL so that stack accesses use it. */
3376 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3377 IEM_SET_CPL(pVCpu, uNewCpl);
3378
3379 /* Create the stack frame. */
3380 RTPTRUNION uStackFrame;
3381 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3382 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3383 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3384 if (rcStrict != VINF_SUCCESS)
3385 return rcStrict;
3386 void * const pvStackFrame = uStackFrame.pv;
3387 if (f32BitGate)
3388 {
3389 if (fFlags & IEM_XCPT_FLAGS_ERR)
3390 *uStackFrame.pu32++ = uErr;
3391 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3392 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3393 uStackFrame.pu32[2] = fEfl;
3394 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3395 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3396 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3397 if (fEfl & X86_EFL_VM)
3398 {
3399 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3400 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3401 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3402 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3403 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3404 }
3405 }
3406 else
3407 {
3408 if (fFlags & IEM_XCPT_FLAGS_ERR)
3409 *uStackFrame.pu16++ = uErr;
3410 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3411 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3412 uStackFrame.pu16[2] = fEfl;
3413 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3414 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3415 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3416 if (fEfl & X86_EFL_VM)
3417 {
3418 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3419 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3420 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3421 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3422 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3423 }
3424 }
3425 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3426 if (rcStrict != VINF_SUCCESS)
3427 return rcStrict;
3428
3429 /* Mark the selectors 'accessed' (hope this is the correct time). */
3430 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3431 * after pushing the stack frame? (Write protect the gdt + stack to
3432 * find out.) */
3433 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3434 {
3435 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3436 if (rcStrict != VINF_SUCCESS)
3437 return rcStrict;
3438 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3439 }
3440
3441 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3442 {
3443 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3444 if (rcStrict != VINF_SUCCESS)
3445 return rcStrict;
3446 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3447 }
3448
3449 /*
3450 * Start comitting the register changes (joins with the DPL=CPL branch).
3451 */
3452 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3453 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3454 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3455 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3456 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3457 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3458 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3459 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3460 * SP is loaded).
3461 * Need to check the other combinations too:
3462 * - 16-bit TSS, 32-bit handler
3463 * - 32-bit TSS, 16-bit handler */
3464 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3465 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3466 else
3467 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3468
3469 if (fEfl & X86_EFL_VM)
3470 {
3471 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3472 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3473 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3474 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3475 }
3476 }
3477 /*
3478 * Same privilege, no stack change and smaller stack frame.
3479 */
3480 else
3481 {
3482 uint64_t uNewRsp;
3483 RTPTRUNION uStackFrame;
3484 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3485 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3486 if (rcStrict != VINF_SUCCESS)
3487 return rcStrict;
3488 void * const pvStackFrame = uStackFrame.pv;
3489
3490 if (f32BitGate)
3491 {
3492 if (fFlags & IEM_XCPT_FLAGS_ERR)
3493 *uStackFrame.pu32++ = uErr;
3494 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3495 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3496 uStackFrame.pu32[2] = fEfl;
3497 }
3498 else
3499 {
3500 if (fFlags & IEM_XCPT_FLAGS_ERR)
3501 *uStackFrame.pu16++ = uErr;
3502 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3503 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
3504 uStackFrame.pu16[2] = fEfl;
3505 }
3506 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3507 if (rcStrict != VINF_SUCCESS)
3508 return rcStrict;
3509
3510 /* Mark the CS selector as 'accessed'. */
3511 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3512 {
3513 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3514 if (rcStrict != VINF_SUCCESS)
3515 return rcStrict;
3516 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3517 }
3518
3519 /*
3520 * Start committing the register changes (joins with the other branch).
3521 */
3522 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3523 }
3524
3525 /* ... register committing continues. */
3526 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3527 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3528 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3529 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3530 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3531 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3532
3533 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3534 fEfl &= ~fEflToClear;
3535 IEMMISC_SET_EFL(pVCpu, fEfl);
3536
3537 if (fFlags & IEM_XCPT_FLAGS_CR2)
3538 pVCpu->cpum.GstCtx.cr2 = uCr2;
3539
3540 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3541 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3542
3543 /* Make sure the execution flags are correct. */
3544 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
3545 if (fExecNew != pVCpu->iem.s.fExec)
3546 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
3547 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
3548 pVCpu->iem.s.fExec = fExecNew;
3549 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
3550
3551 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3552}
3553
3554
3555/**
3556 * Implements exceptions and interrupts for long mode.
3557 *
3558 * @returns VBox strict status code.
3559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3560 * @param cbInstr The number of bytes to offset rIP by in the return
3561 * address.
3562 * @param u8Vector The interrupt / exception vector number.
3563 * @param fFlags The flags.
3564 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3565 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3566 */
3567static VBOXSTRICTRC
3568iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3569 uint8_t cbInstr,
3570 uint8_t u8Vector,
3571 uint32_t fFlags,
3572 uint16_t uErr,
3573 uint64_t uCr2) RT_NOEXCEPT
3574{
3575 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3576
3577 /*
3578 * Read the IDT entry.
3579 */
3580 uint16_t offIdt = (uint16_t)u8Vector << 4;
3581 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3582 {
3583 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3584 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3585 }
3586 X86DESC64 Idte;
3587#ifdef _MSC_VER /* Shut up silly compiler warning. */
3588 Idte.au64[0] = 0;
3589 Idte.au64[1] = 0;
3590#endif
3591 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3592 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3593 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3594 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3595 {
3596 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3597 return rcStrict;
3598 }
3599 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3600 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3601 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3602
3603 /*
3604 * Check the descriptor type, DPL and such.
3605 * ASSUMES this is done in the same order as described for call-gate calls.
3606 */
3607 if (Idte.Gate.u1DescType)
3608 {
3609 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3610 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3611 }
3612 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3613 switch (Idte.Gate.u4Type)
3614 {
3615 case AMD64_SEL_TYPE_SYS_INT_GATE:
3616 fEflToClear |= X86_EFL_IF;
3617 break;
3618 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3619 break;
3620
3621 default:
3622 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3623 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3624 }
3625
3626 /* Check DPL against CPL if applicable. */
3627 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3628 {
3629 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
3630 {
3631 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
3632 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3633 }
3634 }
3635
3636 /* Is it there? */
3637 if (!Idte.Gate.u1Present)
3638 {
3639 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3640 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3641 }
3642
3643 /* A null CS is bad. */
3644 RTSEL NewCS = Idte.Gate.u16Sel;
3645 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3646 {
3647 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3648 return iemRaiseGeneralProtectionFault0(pVCpu);
3649 }
3650
3651 /* Fetch the descriptor for the new CS. */
3652 IEMSELDESC DescCS;
3653 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3654 if (rcStrict != VINF_SUCCESS)
3655 {
3656 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3657 return rcStrict;
3658 }
3659
3660 /* Must be a 64-bit code segment. */
3661 if (!DescCS.Long.Gen.u1DescType)
3662 {
3663 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3664 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3665 }
3666 if ( !DescCS.Long.Gen.u1Long
3667 || DescCS.Long.Gen.u1DefBig
3668 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3669 {
3670 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3671 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3672 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3673 }
3674
3675 /* Don't allow lowering the privilege level. For non-conforming CS
3676 selectors, the CS.DPL sets the privilege level the trap/interrupt
3677 handler runs at. For conforming CS selectors, the CPL remains
3678 unchanged, but the CS.DPL must be <= CPL. */
3679 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3680 * when CPU in Ring-0. Result \#GP? */
3681 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
3682 {
3683 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3684 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
3685 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3686 }
3687
3688
3689 /* Make sure the selector is present. */
3690 if (!DescCS.Legacy.Gen.u1Present)
3691 {
3692 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3693 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3694 }
3695
3696 /* Check that the new RIP is canonical. */
3697 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3698 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3699 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3700 if (!IEM_IS_CANONICAL(uNewRip))
3701 {
3702 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3703 return iemRaiseGeneralProtectionFault0(pVCpu);
3704 }
3705
3706 /*
3707 * If the privilege level changes or if the IST isn't zero, we need to get
3708 * a new stack from the TSS.
3709 */
3710 uint64_t uNewRsp;
3711 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3712 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
3713 if ( uNewCpl != IEM_GET_CPL(pVCpu)
3714 || Idte.Gate.u3IST != 0)
3715 {
3716 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3717 if (rcStrict != VINF_SUCCESS)
3718 return rcStrict;
3719 }
3720 else
3721 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3722 uNewRsp &= ~(uint64_t)0xf;
3723
3724 /*
3725 * Calc the flag image to push.
3726 */
3727 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3728 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3729 fEfl &= ~X86_EFL_RF;
3730 else
3731 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3732
3733 /*
3734 * Start making changes.
3735 */
3736 /* Set the new CPL so that stack accesses use it. */
3737 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
3738 IEM_SET_CPL(pVCpu, uNewCpl);
3739/** @todo Setting CPL this early seems wrong as it would affect and errors we
3740 * raise accessing the stack and (?) GDT/LDT... */
3741
3742 /* Create the stack frame. */
3743 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3744 RTPTRUNION uStackFrame;
3745 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3746 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3747 if (rcStrict != VINF_SUCCESS)
3748 return rcStrict;
3749 void * const pvStackFrame = uStackFrame.pv;
3750
3751 if (fFlags & IEM_XCPT_FLAGS_ERR)
3752 *uStackFrame.pu64++ = uErr;
3753 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3754 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3755 uStackFrame.pu64[2] = fEfl;
3756 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3757 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3758 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3759 if (rcStrict != VINF_SUCCESS)
3760 return rcStrict;
3761
3762 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3763 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3764 * after pushing the stack frame? (Write protect the gdt + stack to
3765 * find out.) */
3766 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3767 {
3768 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3769 if (rcStrict != VINF_SUCCESS)
3770 return rcStrict;
3771 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3772 }
3773
3774 /*
3775 * Start comitting the register changes.
3776 */
3777 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3778 * hidden registers when interrupting 32-bit or 16-bit code! */
3779 if (uNewCpl != uOldCpl)
3780 {
3781 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3782 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3783 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3784 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3785 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3786 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3787 }
3788 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3789 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3790 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3791 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3792 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3793 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3794 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3795 pVCpu->cpum.GstCtx.rip = uNewRip;
3796
3797 fEfl &= ~fEflToClear;
3798 IEMMISC_SET_EFL(pVCpu, fEfl);
3799
3800 if (fFlags & IEM_XCPT_FLAGS_CR2)
3801 pVCpu->cpum.GstCtx.cr2 = uCr2;
3802
3803 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3804 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3805
3806 iemRecalcExecModeAndCplFlags(pVCpu);
3807
3808 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3809}
3810
3811
3812/**
3813 * Implements exceptions and interrupts.
3814 *
3815 * All exceptions and interrupts goes thru this function!
3816 *
3817 * @returns VBox strict status code.
3818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3819 * @param cbInstr The number of bytes to offset rIP by in the return
3820 * address.
3821 * @param u8Vector The interrupt / exception vector number.
3822 * @param fFlags The flags.
3823 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3824 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3825 */
3826VBOXSTRICTRC
3827iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3828 uint8_t cbInstr,
3829 uint8_t u8Vector,
3830 uint32_t fFlags,
3831 uint16_t uErr,
3832 uint64_t uCr2) RT_NOEXCEPT
3833{
3834 /*
3835 * Get all the state that we might need here.
3836 */
3837 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3838 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3839
3840#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3841 /*
3842 * Flush prefetch buffer
3843 */
3844 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3845#endif
3846
3847 /*
3848 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3849 */
3850 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3851 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3852 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3853 | IEM_XCPT_FLAGS_BP_INSTR
3854 | IEM_XCPT_FLAGS_ICEBP_INSTR
3855 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3856 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3857 {
3858 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3859 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3860 u8Vector = X86_XCPT_GP;
3861 uErr = 0;
3862 }
3863#ifdef DBGFTRACE_ENABLED
3864 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3865 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3866 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3867#endif
3868
3869 /*
3870 * Evaluate whether NMI blocking should be in effect.
3871 * Normally, NMI blocking is in effect whenever we inject an NMI.
3872 */
3873 bool fBlockNmi = u8Vector == X86_XCPT_NMI
3874 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
3875
3876#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3877 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3878 {
3879 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3880 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3881 return rcStrict0;
3882
3883 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3884 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3885 {
3886 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3887 fBlockNmi = false;
3888 }
3889 }
3890#endif
3891
3892#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3893 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3894 {
3895 /*
3896 * If the event is being injected as part of VMRUN, it isn't subject to event
3897 * intercepts in the nested-guest. However, secondary exceptions that occur
3898 * during injection of any event -are- subject to exception intercepts.
3899 *
3900 * See AMD spec. 15.20 "Event Injection".
3901 */
3902 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3903 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3904 else
3905 {
3906 /*
3907 * Check and handle if the event being raised is intercepted.
3908 */
3909 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3910 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3911 return rcStrict0;
3912 }
3913 }
3914#endif
3915
3916 /*
3917 * Set NMI blocking if necessary.
3918 */
3919 if (fBlockNmi)
3920 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3921
3922 /*
3923 * Do recursion accounting.
3924 */
3925 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3926 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3927 if (pVCpu->iem.s.cXcptRecursions == 0)
3928 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3929 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3930 else
3931 {
3932 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3933 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3934 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3935
3936 if (pVCpu->iem.s.cXcptRecursions >= 4)
3937 {
3938#ifdef DEBUG_bird
3939 AssertFailed();
3940#endif
3941 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3942 }
3943
3944 /*
3945 * Evaluate the sequence of recurring events.
3946 */
3947 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3948 NULL /* pXcptRaiseInfo */);
3949 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3950 { /* likely */ }
3951 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3952 {
3953 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3954 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3955 u8Vector = X86_XCPT_DF;
3956 uErr = 0;
3957#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3958 /* VMX nested-guest #DF intercept needs to be checked here. */
3959 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3960 {
3961 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3962 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3963 return rcStrict0;
3964 }
3965#endif
3966 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3967 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3968 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3969 }
3970 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3971 {
3972 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3973 return iemInitiateCpuShutdown(pVCpu);
3974 }
3975 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3976 {
3977 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3978 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3979 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3980 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3981 return VERR_EM_GUEST_CPU_HANG;
3982 }
3983 else
3984 {
3985 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3986 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3987 return VERR_IEM_IPE_9;
3988 }
3989
3990 /*
3991 * The 'EXT' bit is set when an exception occurs during deliver of an external
3992 * event (such as an interrupt or earlier exception)[1]. Privileged software
3993 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3994 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3995 *
3996 * [1] - Intel spec. 6.13 "Error Code"
3997 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3998 * [3] - Intel Instruction reference for INT n.
3999 */
4000 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
4001 && (fFlags & IEM_XCPT_FLAGS_ERR)
4002 && u8Vector != X86_XCPT_PF
4003 && u8Vector != X86_XCPT_DF)
4004 {
4005 uErr |= X86_TRAP_ERR_EXTERNAL;
4006 }
4007 }
4008
4009 pVCpu->iem.s.cXcptRecursions++;
4010 pVCpu->iem.s.uCurXcpt = u8Vector;
4011 pVCpu->iem.s.fCurXcpt = fFlags;
4012 pVCpu->iem.s.uCurXcptErr = uErr;
4013 pVCpu->iem.s.uCurXcptCr2 = uCr2;
4014
4015 /*
4016 * Extensive logging.
4017 */
4018#if defined(LOG_ENABLED) && defined(IN_RING3)
4019 if (LogIs3Enabled())
4020 {
4021 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
4022 PVM pVM = pVCpu->CTX_SUFF(pVM);
4023 char szRegs[4096];
4024 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4025 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4026 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4027 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4028 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4029 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4030 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4031 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4032 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4033 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4034 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4035 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4036 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4037 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4038 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4039 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4040 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4041 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4042 " efer=%016VR{efer}\n"
4043 " pat=%016VR{pat}\n"
4044 " sf_mask=%016VR{sf_mask}\n"
4045 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4046 " lstar=%016VR{lstar}\n"
4047 " star=%016VR{star} cstar=%016VR{cstar}\n"
4048 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4049 );
4050
4051 char szInstr[256];
4052 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4053 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4054 szInstr, sizeof(szInstr), NULL);
4055 Log3(("%s%s\n", szRegs, szInstr));
4056 }
4057#endif /* LOG_ENABLED */
4058
4059 /*
4060 * Stats.
4061 */
4062 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
4063 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
4064 else if (u8Vector <= X86_XCPT_LAST)
4065 {
4066 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
4067 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
4068 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
4069 }
4070
4071 /*
4072 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
4073 * to ensure that a stale TLB or paging cache entry will only cause one
4074 * spurious #PF.
4075 */
4076 if ( u8Vector == X86_XCPT_PF
4077 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
4078 IEMTlbInvalidatePage(pVCpu, uCr2);
4079
4080 /*
4081 * Call the mode specific worker function.
4082 */
4083 VBOXSTRICTRC rcStrict;
4084 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4085 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4086 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
4087 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4088 else
4089 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4090
4091 /* Flush the prefetch buffer. */
4092 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
4093
4094 /*
4095 * Unwind.
4096 */
4097 pVCpu->iem.s.cXcptRecursions--;
4098 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
4099 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
4100 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
4101 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
4102 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
4103 return rcStrict;
4104}
4105
4106#ifdef IEM_WITH_SETJMP
4107/**
4108 * See iemRaiseXcptOrInt. Will not return.
4109 */
4110DECL_NO_RETURN(void)
4111iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
4112 uint8_t cbInstr,
4113 uint8_t u8Vector,
4114 uint32_t fFlags,
4115 uint16_t uErr,
4116 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
4117{
4118 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
4119 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
4120}
4121#endif
4122
4123
4124/** \#DE - 00. */
4125VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
4126{
4127 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4128}
4129
4130
4131/** \#DB - 01.
4132 * @note This automatically clear DR7.GD. */
4133VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
4134{
4135 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
4136 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4137 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
4138}
4139
4140
4141/** \#BR - 05. */
4142VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
4143{
4144 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4145}
4146
4147
4148/** \#UD - 06. */
4149VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
4150{
4151 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4152}
4153
4154
4155/** \#NM - 07. */
4156VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
4157{
4158 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4159}
4160
4161
4162/** \#TS(err) - 0a. */
4163VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4164{
4165 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4166}
4167
4168
4169/** \#TS(tr) - 0a. */
4170VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
4171{
4172 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4173 pVCpu->cpum.GstCtx.tr.Sel, 0);
4174}
4175
4176
4177/** \#TS(0) - 0a. */
4178VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4179{
4180 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4181 0, 0);
4182}
4183
4184
4185/** \#TS(err) - 0a. */
4186VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4187{
4188 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4189 uSel & X86_SEL_MASK_OFF_RPL, 0);
4190}
4191
4192
4193/** \#NP(err) - 0b. */
4194VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4195{
4196 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4197}
4198
4199
4200/** \#NP(sel) - 0b. */
4201VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4202{
4203 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4204 uSel & ~X86_SEL_RPL, 0);
4205}
4206
4207
4208/** \#SS(seg) - 0c. */
4209VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
4210{
4211 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4212 uSel & ~X86_SEL_RPL, 0);
4213}
4214
4215
4216/** \#SS(err) - 0c. */
4217VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4218{
4219 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4220}
4221
4222
4223/** \#GP(n) - 0d. */
4224VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4225{
4226 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4227}
4228
4229
4230/** \#GP(0) - 0d. */
4231VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4232{
4233 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4234}
4235
4236#ifdef IEM_WITH_SETJMP
4237/** \#GP(0) - 0d. */
4238DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4239{
4240 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4241}
4242#endif
4243
4244
4245/** \#GP(sel) - 0d. */
4246VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4247{
4248 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4249 Sel & ~X86_SEL_RPL, 0);
4250}
4251
4252
4253/** \#GP(0) - 0d. */
4254VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4255{
4256 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4257}
4258
4259
4260/** \#GP(sel) - 0d. */
4261VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4262{
4263 NOREF(iSegReg); NOREF(fAccess);
4264 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4265 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4266}
4267
4268#ifdef IEM_WITH_SETJMP
4269/** \#GP(sel) - 0d, longjmp. */
4270DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4271{
4272 NOREF(iSegReg); NOREF(fAccess);
4273 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4274 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4275}
4276#endif
4277
4278/** \#GP(sel) - 0d. */
4279VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4280{
4281 NOREF(Sel);
4282 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4283}
4284
4285#ifdef IEM_WITH_SETJMP
4286/** \#GP(sel) - 0d, longjmp. */
4287DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
4288{
4289 NOREF(Sel);
4290 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4291}
4292#endif
4293
4294
4295/** \#GP(sel) - 0d. */
4296VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4297{
4298 NOREF(iSegReg); NOREF(fAccess);
4299 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4300}
4301
4302#ifdef IEM_WITH_SETJMP
4303/** \#GP(sel) - 0d, longjmp. */
4304DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
4305{
4306 NOREF(iSegReg); NOREF(fAccess);
4307 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4308}
4309#endif
4310
4311
4312/** \#PF(n) - 0e. */
4313VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
4314{
4315 uint16_t uErr;
4316 switch (rc)
4317 {
4318 case VERR_PAGE_NOT_PRESENT:
4319 case VERR_PAGE_TABLE_NOT_PRESENT:
4320 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4321 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4322 uErr = 0;
4323 break;
4324
4325 default:
4326 AssertMsgFailed(("%Rrc\n", rc));
4327 RT_FALL_THRU();
4328 case VERR_ACCESS_DENIED:
4329 uErr = X86_TRAP_PF_P;
4330 break;
4331
4332 /** @todo reserved */
4333 }
4334
4335 if (IEM_GET_CPL(pVCpu) == 3)
4336 uErr |= X86_TRAP_PF_US;
4337
4338 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4339 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4340 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4341 uErr |= X86_TRAP_PF_ID;
4342
4343#if 0 /* This is so much non-sense, really. Why was it done like that? */
4344 /* Note! RW access callers reporting a WRITE protection fault, will clear
4345 the READ flag before calling. So, read-modify-write accesses (RW)
4346 can safely be reported as READ faults. */
4347 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4348 uErr |= X86_TRAP_PF_RW;
4349#else
4350 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4351 {
4352 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4353 /// (regardless of outcome of the comparison in the latter case).
4354 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4355 uErr |= X86_TRAP_PF_RW;
4356 }
4357#endif
4358
4359 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
4360 of the memory operand rather than at the start of it. (Not sure what
4361 happens if it crosses a page boundrary.) The current heuristics for
4362 this is to report the #PF for the last byte if the access is more than
4363 64 bytes. This is probably not correct, but we can work that out later,
4364 main objective now is to get FXSAVE to work like for real hardware and
4365 make bs3-cpu-basic2 work. */
4366 if (cbAccess <= 64)
4367 { /* likely*/ }
4368 else
4369 GCPtrWhere += cbAccess - 1;
4370
4371 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4372 uErr, GCPtrWhere);
4373}
4374
4375#ifdef IEM_WITH_SETJMP
4376/** \#PF(n) - 0e, longjmp. */
4377DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
4378 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
4379{
4380 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
4381}
4382#endif
4383
4384
4385/** \#MF(0) - 10. */
4386VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
4387{
4388 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
4389 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4390
4391 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
4392 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
4393 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
4394}
4395
4396
4397/** \#AC(0) - 11. */
4398VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
4399{
4400 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4401}
4402
4403#ifdef IEM_WITH_SETJMP
4404/** \#AC(0) - 11, longjmp. */
4405DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4406{
4407 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4408}
4409#endif
4410
4411
4412/** \#XF(0)/\#XM(0) - 19. */
4413VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4414{
4415 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4416}
4417
4418
4419/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4420IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4421{
4422 NOREF(cbInstr);
4423 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4424}
4425
4426
4427/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4428IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4429{
4430 NOREF(cbInstr);
4431 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4432}
4433
4434
4435/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4436IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4437{
4438 NOREF(cbInstr);
4439 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4440}
4441
4442
4443/** @} */
4444
4445/** @name Common opcode decoders.
4446 * @{
4447 */
4448//#include <iprt/mem.h>
4449
4450/**
4451 * Used to add extra details about a stub case.
4452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4453 */
4454void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4455{
4456#if defined(LOG_ENABLED) && defined(IN_RING3)
4457 PVM pVM = pVCpu->CTX_SUFF(pVM);
4458 char szRegs[4096];
4459 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4460 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4461 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4462 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4463 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4464 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4465 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4466 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4467 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4468 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4469 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4470 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4471 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4472 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4473 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4474 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4475 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4476 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4477 " efer=%016VR{efer}\n"
4478 " pat=%016VR{pat}\n"
4479 " sf_mask=%016VR{sf_mask}\n"
4480 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4481 " lstar=%016VR{lstar}\n"
4482 " star=%016VR{star} cstar=%016VR{cstar}\n"
4483 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4484 );
4485
4486 char szInstr[256];
4487 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4488 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4489 szInstr, sizeof(szInstr), NULL);
4490
4491 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4492#else
4493 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4494#endif
4495}
4496
4497/** @} */
4498
4499
4500
4501/** @name Register Access.
4502 * @{
4503 */
4504
4505/**
4506 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4507 *
4508 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4509 * segment limit.
4510 *
4511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4512 * @param cbInstr Instruction size.
4513 * @param offNextInstr The offset of the next instruction.
4514 * @param enmEffOpSize Effective operand size.
4515 */
4516VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4517 IEMMODE enmEffOpSize) RT_NOEXCEPT
4518{
4519 switch (enmEffOpSize)
4520 {
4521 case IEMMODE_16BIT:
4522 {
4523 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
4524 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4525 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
4526 pVCpu->cpum.GstCtx.rip = uNewIp;
4527 else
4528 return iemRaiseGeneralProtectionFault0(pVCpu);
4529 break;
4530 }
4531
4532 case IEMMODE_32BIT:
4533 {
4534 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4535 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4536
4537 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
4538 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4539 pVCpu->cpum.GstCtx.rip = uNewEip;
4540 else
4541 return iemRaiseGeneralProtectionFault0(pVCpu);
4542 break;
4543 }
4544
4545 case IEMMODE_64BIT:
4546 {
4547 Assert(IEM_IS_64BIT_CODE(pVCpu));
4548
4549 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4550 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4551 pVCpu->cpum.GstCtx.rip = uNewRip;
4552 else
4553 return iemRaiseGeneralProtectionFault0(pVCpu);
4554 break;
4555 }
4556
4557 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4558 }
4559
4560#ifndef IEM_WITH_CODE_TLB
4561 /* Flush the prefetch buffer. */
4562 pVCpu->iem.s.cbOpcode = cbInstr;
4563#endif
4564
4565 /*
4566 * Clear RF and finish the instruction (maybe raise #DB).
4567 */
4568 return iemRegFinishClearingRF(pVCpu);
4569}
4570
4571
4572/**
4573 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4574 *
4575 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4576 * segment limit.
4577 *
4578 * @returns Strict VBox status code.
4579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4580 * @param cbInstr Instruction size.
4581 * @param offNextInstr The offset of the next instruction.
4582 */
4583VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
4584{
4585 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4586
4587 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
4588 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4589 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
4590 pVCpu->cpum.GstCtx.rip = uNewIp;
4591 else
4592 return iemRaiseGeneralProtectionFault0(pVCpu);
4593
4594#ifndef IEM_WITH_CODE_TLB
4595 /* Flush the prefetch buffer. */
4596 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4597#endif
4598
4599 /*
4600 * Clear RF and finish the instruction (maybe raise #DB).
4601 */
4602 return iemRegFinishClearingRF(pVCpu);
4603}
4604
4605
4606/**
4607 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4608 *
4609 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4610 * segment limit.
4611 *
4612 * @returns Strict VBox status code.
4613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4614 * @param cbInstr Instruction size.
4615 * @param offNextInstr The offset of the next instruction.
4616 * @param enmEffOpSize Effective operand size.
4617 */
4618VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4619 IEMMODE enmEffOpSize) RT_NOEXCEPT
4620{
4621 if (enmEffOpSize == IEMMODE_32BIT)
4622 {
4623 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
4624
4625 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
4626 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4627 pVCpu->cpum.GstCtx.rip = uNewEip;
4628 else
4629 return iemRaiseGeneralProtectionFault0(pVCpu);
4630 }
4631 else
4632 {
4633 Assert(enmEffOpSize == IEMMODE_64BIT);
4634
4635 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
4636 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4637 pVCpu->cpum.GstCtx.rip = uNewRip;
4638 else
4639 return iemRaiseGeneralProtectionFault0(pVCpu);
4640 }
4641
4642#ifndef IEM_WITH_CODE_TLB
4643 /* Flush the prefetch buffer. */
4644 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4645#endif
4646
4647 /*
4648 * Clear RF and finish the instruction (maybe raise #DB).
4649 */
4650 return iemRegFinishClearingRF(pVCpu);
4651}
4652
4653
4654/**
4655 * Performs a near jump to the specified address.
4656 *
4657 * May raise a \#GP(0) if the new IP outside the code segment limit.
4658 *
4659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4660 * @param uNewIp The new IP value.
4661 */
4662VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
4663{
4664 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
4665 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
4666 pVCpu->cpum.GstCtx.rip = uNewIp;
4667 else
4668 return iemRaiseGeneralProtectionFault0(pVCpu);
4669 /** @todo Test 16-bit jump in 64-bit mode. */
4670
4671#ifndef IEM_WITH_CODE_TLB
4672 /* Flush the prefetch buffer. */
4673 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4674#endif
4675
4676 /*
4677 * Clear RF and finish the instruction (maybe raise #DB).
4678 */
4679 return iemRegFinishClearingRF(pVCpu);
4680}
4681
4682
4683/**
4684 * Performs a near jump to the specified address.
4685 *
4686 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
4687 *
4688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4689 * @param uNewEip The new EIP value.
4690 */
4691VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
4692{
4693 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4694 Assert(!IEM_IS_64BIT_CODE(pVCpu));
4695
4696 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
4697 pVCpu->cpum.GstCtx.rip = uNewEip;
4698 else
4699 return iemRaiseGeneralProtectionFault0(pVCpu);
4700
4701#ifndef IEM_WITH_CODE_TLB
4702 /* Flush the prefetch buffer. */
4703 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4704#endif
4705
4706 /*
4707 * Clear RF and finish the instruction (maybe raise #DB).
4708 */
4709 return iemRegFinishClearingRF(pVCpu);
4710}
4711
4712
4713/**
4714 * Performs a near jump to the specified address.
4715 *
4716 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4717 * segment limit.
4718 *
4719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4720 * @param uNewRip The new RIP value.
4721 */
4722VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4723{
4724 Assert(IEM_IS_64BIT_CODE(pVCpu));
4725
4726 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
4727 pVCpu->cpum.GstCtx.rip = uNewRip;
4728 else
4729 return iemRaiseGeneralProtectionFault0(pVCpu);
4730
4731#ifndef IEM_WITH_CODE_TLB
4732 /* Flush the prefetch buffer. */
4733 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4734#endif
4735
4736 /*
4737 * Clear RF and finish the instruction (maybe raise #DB).
4738 */
4739 return iemRegFinishClearingRF(pVCpu);
4740}
4741
4742/** @} */
4743
4744
4745/** @name FPU access and helpers.
4746 *
4747 * @{
4748 */
4749
4750/**
4751 * Updates the x87.DS and FPUDP registers.
4752 *
4753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4754 * @param pFpuCtx The FPU context.
4755 * @param iEffSeg The effective segment register.
4756 * @param GCPtrEff The effective address relative to @a iEffSeg.
4757 */
4758DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4759{
4760 RTSEL sel;
4761 switch (iEffSeg)
4762 {
4763 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4764 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4765 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4766 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4767 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4768 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4769 default:
4770 AssertMsgFailed(("%d\n", iEffSeg));
4771 sel = pVCpu->cpum.GstCtx.ds.Sel;
4772 }
4773 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4774 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4775 {
4776 pFpuCtx->DS = 0;
4777 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4778 }
4779 else if (!IEM_IS_LONG_MODE(pVCpu)) /** @todo this is weird. explain. */
4780 {
4781 pFpuCtx->DS = sel;
4782 pFpuCtx->FPUDP = GCPtrEff;
4783 }
4784 else
4785 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4786}
4787
4788
4789/**
4790 * Rotates the stack registers in the push direction.
4791 *
4792 * @param pFpuCtx The FPU context.
4793 * @remarks This is a complete waste of time, but fxsave stores the registers in
4794 * stack order.
4795 */
4796DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4797{
4798 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4799 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4800 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4801 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4802 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4803 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4804 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4805 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4806 pFpuCtx->aRegs[0].r80 = r80Tmp;
4807}
4808
4809
4810/**
4811 * Rotates the stack registers in the pop direction.
4812 *
4813 * @param pFpuCtx The FPU context.
4814 * @remarks This is a complete waste of time, but fxsave stores the registers in
4815 * stack order.
4816 */
4817DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4818{
4819 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4820 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4821 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4822 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4823 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4824 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4825 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4826 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4827 pFpuCtx->aRegs[7].r80 = r80Tmp;
4828}
4829
4830
4831/**
4832 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4833 * exception prevents it.
4834 *
4835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4836 * @param pResult The FPU operation result to push.
4837 * @param pFpuCtx The FPU context.
4838 */
4839static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4840{
4841 /* Update FSW and bail if there are pending exceptions afterwards. */
4842 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4843 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4844 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4845 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4846 {
4847 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4848 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4849 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4850 pFpuCtx->FSW = fFsw;
4851 return;
4852 }
4853
4854 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4855 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4856 {
4857 /* All is fine, push the actual value. */
4858 pFpuCtx->FTW |= RT_BIT(iNewTop);
4859 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4860 }
4861 else if (pFpuCtx->FCW & X86_FCW_IM)
4862 {
4863 /* Masked stack overflow, push QNaN. */
4864 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4865 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4866 }
4867 else
4868 {
4869 /* Raise stack overflow, don't push anything. */
4870 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4871 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4872 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4873 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4874 return;
4875 }
4876
4877 fFsw &= ~X86_FSW_TOP_MASK;
4878 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4879 pFpuCtx->FSW = fFsw;
4880
4881 iemFpuRotateStackPush(pFpuCtx);
4882 RT_NOREF(pVCpu);
4883}
4884
4885
4886/**
4887 * Stores a result in a FPU register and updates the FSW and FTW.
4888 *
4889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4890 * @param pFpuCtx The FPU context.
4891 * @param pResult The result to store.
4892 * @param iStReg Which FPU register to store it in.
4893 */
4894static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4895{
4896 Assert(iStReg < 8);
4897 uint16_t fNewFsw = pFpuCtx->FSW;
4898 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4899 fNewFsw &= ~X86_FSW_C_MASK;
4900 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4901 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4902 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4903 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4904 pFpuCtx->FSW = fNewFsw;
4905 pFpuCtx->FTW |= RT_BIT(iReg);
4906 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4907 RT_NOREF(pVCpu);
4908}
4909
4910
4911/**
4912 * Only updates the FPU status word (FSW) with the result of the current
4913 * instruction.
4914 *
4915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4916 * @param pFpuCtx The FPU context.
4917 * @param u16FSW The FSW output of the current instruction.
4918 */
4919static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4920{
4921 uint16_t fNewFsw = pFpuCtx->FSW;
4922 fNewFsw &= ~X86_FSW_C_MASK;
4923 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4924 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4925 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4926 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4927 pFpuCtx->FSW = fNewFsw;
4928 RT_NOREF(pVCpu);
4929}
4930
4931
4932/**
4933 * Pops one item off the FPU stack if no pending exception prevents it.
4934 *
4935 * @param pFpuCtx The FPU context.
4936 */
4937static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4938{
4939 /* Check pending exceptions. */
4940 uint16_t uFSW = pFpuCtx->FSW;
4941 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4942 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4943 return;
4944
4945 /* TOP--. */
4946 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4947 uFSW &= ~X86_FSW_TOP_MASK;
4948 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4949 pFpuCtx->FSW = uFSW;
4950
4951 /* Mark the previous ST0 as empty. */
4952 iOldTop >>= X86_FSW_TOP_SHIFT;
4953 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4954
4955 /* Rotate the registers. */
4956 iemFpuRotateStackPop(pFpuCtx);
4957}
4958
4959
4960/**
4961 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4962 *
4963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4964 * @param pResult The FPU operation result to push.
4965 * @param uFpuOpcode The FPU opcode value.
4966 */
4967void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
4968{
4969 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4970 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4971 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4972}
4973
4974
4975/**
4976 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4977 * and sets FPUDP and FPUDS.
4978 *
4979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4980 * @param pResult The FPU operation result to push.
4981 * @param iEffSeg The effective segment register.
4982 * @param GCPtrEff The effective address relative to @a iEffSeg.
4983 * @param uFpuOpcode The FPU opcode value.
4984 */
4985void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff,
4986 uint16_t uFpuOpcode) RT_NOEXCEPT
4987{
4988 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4989 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4990 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
4991 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4992}
4993
4994
4995/**
4996 * Replace ST0 with the first value and push the second onto the FPU stack,
4997 * unless a pending exception prevents it.
4998 *
4999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5000 * @param pResult The FPU operation result to store and push.
5001 * @param uFpuOpcode The FPU opcode value.
5002 */
5003void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT
5004{
5005 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5006 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5007
5008 /* Update FSW and bail if there are pending exceptions afterwards. */
5009 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
5010 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
5011 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
5012 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
5013 {
5014 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
5015 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
5016 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
5017 pFpuCtx->FSW = fFsw;
5018 return;
5019 }
5020
5021 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
5022 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
5023 {
5024 /* All is fine, push the actual value. */
5025 pFpuCtx->FTW |= RT_BIT(iNewTop);
5026 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
5027 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
5028 }
5029 else if (pFpuCtx->FCW & X86_FCW_IM)
5030 {
5031 /* Masked stack overflow, push QNaN. */
5032 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
5033 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5034 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5035 }
5036 else
5037 {
5038 /* Raise stack overflow, don't push anything. */
5039 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
5040 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
5041 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
5042 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5043 return;
5044 }
5045
5046 fFsw &= ~X86_FSW_TOP_MASK;
5047 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
5048 pFpuCtx->FSW = fFsw;
5049
5050 iemFpuRotateStackPush(pFpuCtx);
5051}
5052
5053
5054/**
5055 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5056 * FOP.
5057 *
5058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5059 * @param pResult The result to store.
5060 * @param iStReg Which FPU register to store it in.
5061 * @param uFpuOpcode The FPU opcode value.
5062 */
5063void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5064{
5065 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5066 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5067 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5068}
5069
5070
5071/**
5072 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
5073 * FOP, and then pops the stack.
5074 *
5075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5076 * @param pResult The result to store.
5077 * @param iStReg Which FPU register to store it in.
5078 * @param uFpuOpcode The FPU opcode value.
5079 */
5080void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5081{
5082 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5083 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5084 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5085 iemFpuMaybePopOne(pFpuCtx);
5086}
5087
5088
5089/**
5090 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5091 * FPUDP, and FPUDS.
5092 *
5093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5094 * @param pResult The result to store.
5095 * @param iStReg Which FPU register to store it in.
5096 * @param iEffSeg The effective memory operand selector register.
5097 * @param GCPtrEff The effective memory operand offset.
5098 * @param uFpuOpcode The FPU opcode value.
5099 */
5100void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5101 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5102{
5103 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5104 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5105 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5106 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5107}
5108
5109
5110/**
5111 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
5112 * FPUDP, and FPUDS, and then pops the stack.
5113 *
5114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5115 * @param pResult The result to store.
5116 * @param iStReg Which FPU register to store it in.
5117 * @param iEffSeg The effective memory operand selector register.
5118 * @param GCPtrEff The effective memory operand offset.
5119 * @param uFpuOpcode The FPU opcode value.
5120 */
5121void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
5122 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5123{
5124 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5125 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5126 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5127 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
5128 iemFpuMaybePopOne(pFpuCtx);
5129}
5130
5131
5132/**
5133 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
5134 *
5135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5136 * @param uFpuOpcode The FPU opcode value.
5137 */
5138void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5139{
5140 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5141 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5142}
5143
5144
5145/**
5146 * Updates the FSW, FOP, FPUIP, and FPUCS.
5147 *
5148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5149 * @param u16FSW The FSW from the current instruction.
5150 * @param uFpuOpcode The FPU opcode value.
5151 */
5152void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5153{
5154 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5155 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5156 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5157}
5158
5159
5160/**
5161 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
5162 *
5163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5164 * @param u16FSW The FSW from the current instruction.
5165 * @param uFpuOpcode The FPU opcode value.
5166 */
5167void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5168{
5169 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5170 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5171 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5172 iemFpuMaybePopOne(pFpuCtx);
5173}
5174
5175
5176/**
5177 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
5178 *
5179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5180 * @param u16FSW The FSW from the current instruction.
5181 * @param iEffSeg The effective memory operand selector register.
5182 * @param GCPtrEff The effective memory operand offset.
5183 * @param uFpuOpcode The FPU opcode value.
5184 */
5185void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5186{
5187 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5188 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5189 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5190 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5191}
5192
5193
5194/**
5195 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
5196 *
5197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5198 * @param u16FSW The FSW from the current instruction.
5199 * @param uFpuOpcode The FPU opcode value.
5200 */
5201void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT
5202{
5203 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5204 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5205 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5206 iemFpuMaybePopOne(pFpuCtx);
5207 iemFpuMaybePopOne(pFpuCtx);
5208}
5209
5210
5211/**
5212 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
5213 *
5214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5215 * @param u16FSW The FSW from the current instruction.
5216 * @param iEffSeg The effective memory operand selector register.
5217 * @param GCPtrEff The effective memory operand offset.
5218 * @param uFpuOpcode The FPU opcode value.
5219 */
5220void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5221 uint16_t uFpuOpcode) RT_NOEXCEPT
5222{
5223 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5224 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5225 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5226 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
5227 iemFpuMaybePopOne(pFpuCtx);
5228}
5229
5230
5231/**
5232 * Worker routine for raising an FPU stack underflow exception.
5233 *
5234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5235 * @param pFpuCtx The FPU context.
5236 * @param iStReg The stack register being accessed.
5237 */
5238static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
5239{
5240 Assert(iStReg < 8 || iStReg == UINT8_MAX);
5241 if (pFpuCtx->FCW & X86_FCW_IM)
5242 {
5243 /* Masked underflow. */
5244 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5245 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5246 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
5247 if (iStReg != UINT8_MAX)
5248 {
5249 pFpuCtx->FTW |= RT_BIT(iReg);
5250 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
5251 }
5252 }
5253 else
5254 {
5255 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5256 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5257 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
5258 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5259 }
5260 RT_NOREF(pVCpu);
5261}
5262
5263
5264/**
5265 * Raises a FPU stack underflow exception.
5266 *
5267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5268 * @param iStReg The destination register that should be loaded
5269 * with QNaN if \#IS is not masked. Specify
5270 * UINT8_MAX if none (like for fcom).
5271 * @param uFpuOpcode The FPU opcode value.
5272 */
5273void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5274{
5275 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5276 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5277 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5278}
5279
5280
5281void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5282{
5283 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5284 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5285 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5286 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5287}
5288
5289
5290void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT
5291{
5292 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5293 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5294 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5295 iemFpuMaybePopOne(pFpuCtx);
5296}
5297
5298
5299void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff,
5300 uint16_t uFpuOpcode) RT_NOEXCEPT
5301{
5302 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5303 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5304 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5305 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5306 iemFpuMaybePopOne(pFpuCtx);
5307}
5308
5309
5310void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5311{
5312 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5313 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5314 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5315 iemFpuMaybePopOne(pFpuCtx);
5316 iemFpuMaybePopOne(pFpuCtx);
5317}
5318
5319
5320void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5321{
5322 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5323 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5324
5325 if (pFpuCtx->FCW & X86_FCW_IM)
5326 {
5327 /* Masked overflow - Push QNaN. */
5328 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5329 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5330 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5331 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5332 pFpuCtx->FTW |= RT_BIT(iNewTop);
5333 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5334 iemFpuRotateStackPush(pFpuCtx);
5335 }
5336 else
5337 {
5338 /* Exception pending - don't change TOP or the register stack. */
5339 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5340 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5341 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5342 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5343 }
5344}
5345
5346
5347void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5348{
5349 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5350 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5351
5352 if (pFpuCtx->FCW & X86_FCW_IM)
5353 {
5354 /* Masked overflow - Push QNaN. */
5355 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5356 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5357 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5358 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5359 pFpuCtx->FTW |= RT_BIT(iNewTop);
5360 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5361 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5362 iemFpuRotateStackPush(pFpuCtx);
5363 }
5364 else
5365 {
5366 /* Exception pending - don't change TOP or the register stack. */
5367 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5368 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5369 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5370 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5371 }
5372}
5373
5374
5375/**
5376 * Worker routine for raising an FPU stack overflow exception on a push.
5377 *
5378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5379 * @param pFpuCtx The FPU context.
5380 */
5381static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5382{
5383 if (pFpuCtx->FCW & X86_FCW_IM)
5384 {
5385 /* Masked overflow. */
5386 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5387 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5388 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5389 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5390 pFpuCtx->FTW |= RT_BIT(iNewTop);
5391 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5392 iemFpuRotateStackPush(pFpuCtx);
5393 }
5394 else
5395 {
5396 /* Exception pending - don't change TOP or the register stack. */
5397 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5398 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5399 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5400 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5401 }
5402 RT_NOREF(pVCpu);
5403}
5404
5405
5406/**
5407 * Raises a FPU stack overflow exception on a push.
5408 *
5409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5410 * @param uFpuOpcode The FPU opcode value.
5411 */
5412void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT
5413{
5414 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5415 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5416 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5417}
5418
5419
5420/**
5421 * Raises a FPU stack overflow exception on a push with a memory operand.
5422 *
5423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5424 * @param iEffSeg The effective memory operand selector register.
5425 * @param GCPtrEff The effective memory operand offset.
5426 * @param uFpuOpcode The FPU opcode value.
5427 */
5428void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT
5429{
5430 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5431 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5432 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
5433 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5434}
5435
5436/** @} */
5437
5438
5439/** @name SSE+AVX SIMD access and helpers.
5440 *
5441 * @{
5442 */
5443/**
5444 * Stores a result in a SIMD XMM register, updates the MXCSR.
5445 *
5446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5447 * @param pResult The result to store.
5448 * @param iXmmReg Which SIMD XMM register to store the result in.
5449 */
5450void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5451{
5452 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5453 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5454
5455 /* The result is only updated if there is no unmasked exception pending. */
5456 if (( ~((pFpuCtx->MXCSR & X86_MXCSR_XCPT_MASK) >> X86_MXCSR_XCPT_MASK_SHIFT)
5457 & (pFpuCtx->MXCSR & X86_MXCSR_XCPT_FLAGS)) == 0)
5458 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5459}
5460
5461
5462/**
5463 * Updates the MXCSR.
5464 *
5465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5466 * @param fMxcsr The new MXCSR value.
5467 */
5468void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT
5469{
5470 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5471 pFpuCtx->MXCSR |= fMxcsr & X86_MXCSR_XCPT_FLAGS;
5472}
5473/** @} */
5474
5475
5476/** @name Memory access.
5477 *
5478 * @{
5479 */
5480
5481#undef LOG_GROUP
5482#define LOG_GROUP LOG_GROUP_IEM_MEM
5483
5484/**
5485 * Updates the IEMCPU::cbWritten counter if applicable.
5486 *
5487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5488 * @param fAccess The access being accounted for.
5489 * @param cbMem The access size.
5490 */
5491DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5492{
5493 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5494 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5495 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5496}
5497
5498
5499/**
5500 * Applies the segment limit, base and attributes.
5501 *
5502 * This may raise a \#GP or \#SS.
5503 *
5504 * @returns VBox strict status code.
5505 *
5506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5507 * @param fAccess The kind of access which is being performed.
5508 * @param iSegReg The index of the segment register to apply.
5509 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5510 * TSS, ++).
5511 * @param cbMem The access size.
5512 * @param pGCPtrMem Pointer to the guest memory address to apply
5513 * segmentation to. Input and output parameter.
5514 */
5515VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5516{
5517 if (iSegReg == UINT8_MAX)
5518 return VINF_SUCCESS;
5519
5520 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5521 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5522 switch (IEM_GET_CPU_MODE(pVCpu))
5523 {
5524 case IEMMODE_16BIT:
5525 case IEMMODE_32BIT:
5526 {
5527 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5528 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5529
5530 if ( pSel->Attr.n.u1Present
5531 && !pSel->Attr.n.u1Unusable)
5532 {
5533 Assert(pSel->Attr.n.u1DescType);
5534 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5535 {
5536 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5537 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5538 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5539
5540 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5541 {
5542 /** @todo CPL check. */
5543 }
5544
5545 /*
5546 * There are two kinds of data selectors, normal and expand down.
5547 */
5548 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5549 {
5550 if ( GCPtrFirst32 > pSel->u32Limit
5551 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5552 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5553 }
5554 else
5555 {
5556 /*
5557 * The upper boundary is defined by the B bit, not the G bit!
5558 */
5559 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5560 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5561 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5562 }
5563 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5564 }
5565 else
5566 {
5567 /*
5568 * Code selector and usually be used to read thru, writing is
5569 * only permitted in real and V8086 mode.
5570 */
5571 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5572 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5573 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5574 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5575 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5576
5577 if ( GCPtrFirst32 > pSel->u32Limit
5578 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5579 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5580
5581 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5582 {
5583 /** @todo CPL check. */
5584 }
5585
5586 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5587 }
5588 }
5589 else
5590 return iemRaiseGeneralProtectionFault0(pVCpu);
5591 return VINF_SUCCESS;
5592 }
5593
5594 case IEMMODE_64BIT:
5595 {
5596 RTGCPTR GCPtrMem = *pGCPtrMem;
5597 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5598 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5599
5600 Assert(cbMem >= 1);
5601 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5602 return VINF_SUCCESS;
5603 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5604 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5605 return iemRaiseGeneralProtectionFault0(pVCpu);
5606 }
5607
5608 default:
5609 AssertFailedReturn(VERR_IEM_IPE_7);
5610 }
5611}
5612
5613
5614/**
5615 * Translates a virtual address to a physical physical address and checks if we
5616 * can access the page as specified.
5617 *
5618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5619 * @param GCPtrMem The virtual address.
5620 * @param cbAccess The access size, for raising \#PF correctly for
5621 * FXSAVE and such.
5622 * @param fAccess The intended access.
5623 * @param pGCPhysMem Where to return the physical address.
5624 */
5625VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess,
5626 uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5627{
5628 /** @todo Need a different PGM interface here. We're currently using
5629 * generic / REM interfaces. this won't cut it for R0. */
5630 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5631 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5632 * here. */
5633 PGMPTWALK Walk;
5634 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5635 if (RT_FAILURE(rc))
5636 {
5637 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5638 /** @todo Check unassigned memory in unpaged mode. */
5639 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5640#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5641 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5642 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5643#endif
5644 *pGCPhysMem = NIL_RTGCPHYS;
5645 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, rc);
5646 }
5647
5648 /* If the page is writable and does not have the no-exec bit set, all
5649 access is allowed. Otherwise we'll have to check more carefully... */
5650 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5651 {
5652 /* Write to read only memory? */
5653 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5654 && !(Walk.fEffective & X86_PTE_RW)
5655 && ( ( IEM_GET_CPL(pVCpu) == 3
5656 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5657 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5658 {
5659 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5660 *pGCPhysMem = NIL_RTGCPHYS;
5661#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5662 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5663 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5664#endif
5665 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5666 }
5667
5668 /* Kernel memory accessed by userland? */
5669 if ( !(Walk.fEffective & X86_PTE_US)
5670 && IEM_GET_CPL(pVCpu) == 3
5671 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5672 {
5673 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5674 *pGCPhysMem = NIL_RTGCPHYS;
5675#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5676 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5677 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5678#endif
5679 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess, VERR_ACCESS_DENIED);
5680 }
5681
5682 /* Executing non-executable memory? */
5683 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5684 && (Walk.fEffective & X86_PTE_PAE_NX)
5685 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5686 {
5687 LogEx(LOG_GROUP_IEM,("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5688 *pGCPhysMem = NIL_RTGCPHYS;
5689#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5690 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5691 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5692#endif
5693 return iemRaisePageFault(pVCpu, GCPtrMem, cbAccess, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5694 VERR_ACCESS_DENIED);
5695 }
5696 }
5697
5698 /*
5699 * Set the dirty / access flags.
5700 * ASSUMES this is set when the address is translated rather than on committ...
5701 */
5702 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5703 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5704 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5705 {
5706 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5707 AssertRC(rc2);
5708 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5709 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5710 }
5711
5712 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5713 *pGCPhysMem = GCPhys;
5714 return VINF_SUCCESS;
5715}
5716
5717
5718/**
5719 * Looks up a memory mapping entry.
5720 *
5721 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5723 * @param pvMem The memory address.
5724 * @param fAccess The access to.
5725 */
5726DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5727{
5728 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5729 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5730 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5731 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5732 return 0;
5733 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5734 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5735 return 1;
5736 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5737 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5738 return 2;
5739 return VERR_NOT_FOUND;
5740}
5741
5742
5743/**
5744 * Finds a free memmap entry when using iNextMapping doesn't work.
5745 *
5746 * @returns Memory mapping index, 1024 on failure.
5747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5748 */
5749static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5750{
5751 /*
5752 * The easy case.
5753 */
5754 if (pVCpu->iem.s.cActiveMappings == 0)
5755 {
5756 pVCpu->iem.s.iNextMapping = 1;
5757 return 0;
5758 }
5759
5760 /* There should be enough mappings for all instructions. */
5761 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5762
5763 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5764 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5765 return i;
5766
5767 AssertFailedReturn(1024);
5768}
5769
5770
5771/**
5772 * Commits a bounce buffer that needs writing back and unmaps it.
5773 *
5774 * @returns Strict VBox status code.
5775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5776 * @param iMemMap The index of the buffer to commit.
5777 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5778 * Always false in ring-3, obviously.
5779 */
5780static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5781{
5782 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5783 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5784#ifdef IN_RING3
5785 Assert(!fPostponeFail);
5786 RT_NOREF_PV(fPostponeFail);
5787#endif
5788
5789 /*
5790 * Do the writing.
5791 */
5792 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5793 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5794 {
5795 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5796 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5797 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5798 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
5799 {
5800 /*
5801 * Carefully and efficiently dealing with access handler return
5802 * codes make this a little bloated.
5803 */
5804 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5805 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5806 pbBuf,
5807 cbFirst,
5808 PGMACCESSORIGIN_IEM);
5809 if (rcStrict == VINF_SUCCESS)
5810 {
5811 if (cbSecond)
5812 {
5813 rcStrict = PGMPhysWrite(pVM,
5814 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5815 pbBuf + cbFirst,
5816 cbSecond,
5817 PGMACCESSORIGIN_IEM);
5818 if (rcStrict == VINF_SUCCESS)
5819 { /* nothing */ }
5820 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5821 {
5822 LogEx(LOG_GROUP_IEM,
5823 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5825 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5826 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5827 }
5828#ifndef IN_RING3
5829 else if (fPostponeFail)
5830 {
5831 LogEx(LOG_GROUP_IEM,
5832 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5833 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5834 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5835 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5836 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5837 return iemSetPassUpStatus(pVCpu, rcStrict);
5838 }
5839#endif
5840 else
5841 {
5842 LogEx(LOG_GROUP_IEM,
5843 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5844 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5845 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5846 return rcStrict;
5847 }
5848 }
5849 }
5850 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5851 {
5852 if (!cbSecond)
5853 {
5854 LogEx(LOG_GROUP_IEM,
5855 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5856 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5857 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5858 }
5859 else
5860 {
5861 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5862 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5863 pbBuf + cbFirst,
5864 cbSecond,
5865 PGMACCESSORIGIN_IEM);
5866 if (rcStrict2 == VINF_SUCCESS)
5867 {
5868 LogEx(LOG_GROUP_IEM,
5869 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5870 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5872 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5873 }
5874 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5875 {
5876 LogEx(LOG_GROUP_IEM,
5877 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5878 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5879 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5880 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5881 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5882 }
5883#ifndef IN_RING3
5884 else if (fPostponeFail)
5885 {
5886 LogEx(LOG_GROUP_IEM,
5887 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5888 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5889 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5890 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5891 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5892 return iemSetPassUpStatus(pVCpu, rcStrict);
5893 }
5894#endif
5895 else
5896 {
5897 LogEx(LOG_GROUP_IEM,
5898 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5899 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5900 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5901 return rcStrict2;
5902 }
5903 }
5904 }
5905#ifndef IN_RING3
5906 else if (fPostponeFail)
5907 {
5908 LogEx(LOG_GROUP_IEM,
5909 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5910 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5911 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5912 if (!cbSecond)
5913 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5914 else
5915 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5916 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5917 return iemSetPassUpStatus(pVCpu, rcStrict);
5918 }
5919#endif
5920 else
5921 {
5922 LogEx(LOG_GROUP_IEM,
5923 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5924 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5925 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5926 return rcStrict;
5927 }
5928 }
5929 else
5930 {
5931 /*
5932 * No access handlers, much simpler.
5933 */
5934 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5935 if (RT_SUCCESS(rc))
5936 {
5937 if (cbSecond)
5938 {
5939 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5940 if (RT_SUCCESS(rc))
5941 { /* likely */ }
5942 else
5943 {
5944 LogEx(LOG_GROUP_IEM,
5945 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5946 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5947 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5948 return rc;
5949 }
5950 }
5951 }
5952 else
5953 {
5954 LogEx(LOG_GROUP_IEM,
5955 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5956 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5957 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5958 return rc;
5959 }
5960 }
5961 }
5962
5963#if defined(IEM_LOG_MEMORY_WRITES)
5964 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5965 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5966 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5967 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5968 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5969 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5970
5971 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5972 g_cbIemWrote = cbWrote;
5973 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5974#endif
5975
5976 /*
5977 * Free the mapping entry.
5978 */
5979 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5980 Assert(pVCpu->iem.s.cActiveMappings != 0);
5981 pVCpu->iem.s.cActiveMappings--;
5982 return VINF_SUCCESS;
5983}
5984
5985
5986/**
5987 * iemMemMap worker that deals with a request crossing pages.
5988 */
5989static VBOXSTRICTRC
5990iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5991{
5992 Assert(cbMem <= GUEST_PAGE_SIZE);
5993
5994 /*
5995 * Do the address translations.
5996 */
5997 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
5998 RTGCPHYS GCPhysFirst;
5999 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
6000 if (rcStrict != VINF_SUCCESS)
6001 return rcStrict;
6002 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
6003
6004 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
6005 RTGCPHYS GCPhysSecond;
6006 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
6007 cbSecondPage, fAccess, &GCPhysSecond);
6008 if (rcStrict != VINF_SUCCESS)
6009 return rcStrict;
6010 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
6011 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
6012
6013 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6014
6015 /*
6016 * Read in the current memory content if it's a read, execute or partial
6017 * write access.
6018 */
6019 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6020
6021 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6022 {
6023 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6024 {
6025 /*
6026 * Must carefully deal with access handler status codes here,
6027 * makes the code a bit bloated.
6028 */
6029 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
6030 if (rcStrict == VINF_SUCCESS)
6031 {
6032 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6033 if (rcStrict == VINF_SUCCESS)
6034 { /*likely */ }
6035 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6036 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6037 else
6038 {
6039 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
6040 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
6041 return rcStrict;
6042 }
6043 }
6044 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6045 {
6046 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
6047 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
6048 {
6049 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
6050 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6051 }
6052 else
6053 {
6054 LogEx(LOG_GROUP_IEM,
6055 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
6056 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
6057 return rcStrict2;
6058 }
6059 }
6060 else
6061 {
6062 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6063 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6064 return rcStrict;
6065 }
6066 }
6067 else
6068 {
6069 /*
6070 * No informational status codes here, much more straight forward.
6071 */
6072 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
6073 if (RT_SUCCESS(rc))
6074 {
6075 Assert(rc == VINF_SUCCESS);
6076 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
6077 if (RT_SUCCESS(rc))
6078 Assert(rc == VINF_SUCCESS);
6079 else
6080 {
6081 LogEx(LOG_GROUP_IEM,
6082 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
6083 return rc;
6084 }
6085 }
6086 else
6087 {
6088 LogEx(LOG_GROUP_IEM,
6089 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
6090 return rc;
6091 }
6092 }
6093 }
6094#ifdef VBOX_STRICT
6095 else
6096 memset(pbBuf, 0xcc, cbMem);
6097 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6098 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6099#endif
6100 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
6101
6102 /*
6103 * Commit the bounce buffer entry.
6104 */
6105 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6106 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
6107 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
6108 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
6109 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
6110 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6111 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6112 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6113 pVCpu->iem.s.cActiveMappings++;
6114
6115 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6116 *ppvMem = pbBuf;
6117 return VINF_SUCCESS;
6118}
6119
6120
6121/**
6122 * iemMemMap woker that deals with iemMemPageMap failures.
6123 */
6124static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
6125 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
6126{
6127 /*
6128 * Filter out conditions we can handle and the ones which shouldn't happen.
6129 */
6130 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
6131 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
6132 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
6133 {
6134 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
6135 return rcMap;
6136 }
6137 pVCpu->iem.s.cPotentialExits++;
6138
6139 /*
6140 * Read in the current memory content if it's a read, execute or partial
6141 * write access.
6142 */
6143 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
6144 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
6145 {
6146 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
6147 memset(pbBuf, 0xff, cbMem);
6148 else
6149 {
6150 int rc;
6151 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
6152 {
6153 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
6154 if (rcStrict == VINF_SUCCESS)
6155 { /* nothing */ }
6156 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
6157 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6158 else
6159 {
6160 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6161 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
6162 return rcStrict;
6163 }
6164 }
6165 else
6166 {
6167 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
6168 if (RT_SUCCESS(rc))
6169 { /* likely */ }
6170 else
6171 {
6172 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
6173 GCPhysFirst, rc));
6174 return rc;
6175 }
6176 }
6177 }
6178 }
6179#ifdef VBOX_STRICT
6180 else
6181 memset(pbBuf, 0xcc, cbMem);
6182#endif
6183#ifdef VBOX_STRICT
6184 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
6185 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
6186#endif
6187
6188 /*
6189 * Commit the bounce buffer entry.
6190 */
6191 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
6192 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
6193 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
6194 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
6195 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
6196 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
6197 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
6198 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6199 pVCpu->iem.s.cActiveMappings++;
6200
6201 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6202 *ppvMem = pbBuf;
6203 return VINF_SUCCESS;
6204}
6205
6206
6207
6208/**
6209 * Maps the specified guest memory for the given kind of access.
6210 *
6211 * This may be using bounce buffering of the memory if it's crossing a page
6212 * boundary or if there is an access handler installed for any of it. Because
6213 * of lock prefix guarantees, we're in for some extra clutter when this
6214 * happens.
6215 *
6216 * This may raise a \#GP, \#SS, \#PF or \#AC.
6217 *
6218 * @returns VBox strict status code.
6219 *
6220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6221 * @param ppvMem Where to return the pointer to the mapped memory.
6222 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
6223 * 8, 12, 16, 32 or 512. When used by string operations
6224 * it can be up to a page.
6225 * @param iSegReg The index of the segment register to use for this
6226 * access. The base and limits are checked. Use UINT8_MAX
6227 * to indicate that no segmentation is required (for IDT,
6228 * GDT and LDT accesses).
6229 * @param GCPtrMem The address of the guest memory.
6230 * @param fAccess How the memory is being accessed. The
6231 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
6232 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
6233 * when raising exceptions.
6234 * @param uAlignCtl Alignment control:
6235 * - Bits 15:0 is the alignment mask.
6236 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6237 * IEM_MEMMAP_F_ALIGN_SSE, and
6238 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6239 * Pass zero to skip alignment.
6240 */
6241VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
6242 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
6243{
6244 /*
6245 * Check the input and figure out which mapping entry to use.
6246 */
6247 Assert(cbMem <= sizeof(pVCpu->iem.s.aBounceBuffers[0]));
6248 Assert( cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94
6249 || (iSegReg == UINT8_MAX && uAlignCtl == 0 && fAccess == IEM_ACCESS_DATA_R /* for the CPUID logging interface */) );
6250 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6251 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6252
6253 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6254 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6255 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6256 {
6257 iMemMap = iemMemMapFindFree(pVCpu);
6258 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6259 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6260 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6261 pVCpu->iem.s.aMemMappings[2].fAccess),
6262 VERR_IEM_IPE_9);
6263 }
6264
6265 /*
6266 * Map the memory, checking that we can actually access it. If something
6267 * slightly complicated happens, fall back on bounce buffering.
6268 */
6269 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6270 if (rcStrict == VINF_SUCCESS)
6271 { /* likely */ }
6272 else
6273 return rcStrict;
6274
6275 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
6276 { /* likely */ }
6277 else
6278 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
6279
6280 /*
6281 * Alignment check.
6282 */
6283 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6284 { /* likelyish */ }
6285 else
6286 {
6287 /* Misaligned access. */
6288 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6289 {
6290 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6291 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6292 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6293 {
6294 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6295
6296 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6297 return iemRaiseAlignmentCheckException(pVCpu);
6298 }
6299 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6300 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6301 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6302 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6303 * that's what FXSAVE does on a 10980xe. */
6304 && iemMemAreAlignmentChecksEnabled(pVCpu))
6305 return iemRaiseAlignmentCheckException(pVCpu);
6306 else
6307 return iemRaiseGeneralProtectionFault0(pVCpu);
6308 }
6309 }
6310
6311#ifdef IEM_WITH_DATA_TLB
6312 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6313
6314 /*
6315 * Get the TLB entry for this page.
6316 */
6317 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6318 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6319 if (pTlbe->uTag == uTag)
6320 {
6321# ifdef VBOX_WITH_STATISTICS
6322 pVCpu->iem.s.DataTlb.cTlbHits++;
6323# endif
6324 }
6325 else
6326 {
6327 pVCpu->iem.s.DataTlb.cTlbMisses++;
6328 PGMPTWALK Walk;
6329 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6330 if (RT_FAILURE(rc))
6331 {
6332 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6333# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6334 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6335 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6336# endif
6337 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6338 }
6339
6340 Assert(Walk.fSucceeded);
6341 pTlbe->uTag = uTag;
6342 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6343 pTlbe->GCPhys = Walk.GCPhys;
6344 pTlbe->pbMappingR3 = NULL;
6345 }
6346
6347 /*
6348 * Check TLB page table level access flags.
6349 */
6350 /* If the page is either supervisor only or non-writable, we need to do
6351 more careful access checks. */
6352 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6353 {
6354 /* Write to read only memory? */
6355 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6356 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6357 && ( ( IEM_GET_CPL(pVCpu) == 3
6358 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6359 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6360 {
6361 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6362# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6363 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6364 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6365# endif
6366 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6367 }
6368
6369 /* Kernel memory accessed by userland? */
6370 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6371 && IEM_GET_CPL(pVCpu) == 3
6372 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6373 {
6374 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6375# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6376 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6377 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6378# endif
6379 return iemRaisePageFault(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6380 }
6381 }
6382
6383 /*
6384 * Set the dirty / access flags.
6385 * ASSUMES this is set when the address is translated rather than on commit...
6386 */
6387 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6388 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6389 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6390 {
6391 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6392 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6393 AssertRC(rc2);
6394 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6395 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6396 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6397 }
6398
6399 /*
6400 * Look up the physical page info if necessary.
6401 */
6402 uint8_t *pbMem = NULL;
6403 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6404# ifdef IN_RING3
6405 pbMem = pTlbe->pbMappingR3;
6406# else
6407 pbMem = NULL;
6408# endif
6409 else
6410 {
6411 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6412 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6413 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6414 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6415 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6416 { /* likely */ }
6417 else
6418 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6419 pTlbe->pbMappingR3 = NULL;
6420 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6421 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6422 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6423 &pbMem, &pTlbe->fFlagsAndPhysRev);
6424 AssertRCReturn(rc, rc);
6425# ifdef IN_RING3
6426 pTlbe->pbMappingR3 = pbMem;
6427# endif
6428 }
6429
6430 /*
6431 * Check the physical page level access and mapping.
6432 */
6433 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6434 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6435 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6436 { /* probably likely */ }
6437 else
6438 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6439 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6440 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6441 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6442 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6443 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6444
6445 if (pbMem)
6446 {
6447 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6448 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6449 fAccess |= IEM_ACCESS_NOT_LOCKED;
6450 }
6451 else
6452 {
6453 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6454 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6455 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6456 if (rcStrict != VINF_SUCCESS)
6457 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6458 }
6459
6460 void * const pvMem = pbMem;
6461
6462 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6463 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6464 if (fAccess & IEM_ACCESS_TYPE_READ)
6465 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6466
6467#else /* !IEM_WITH_DATA_TLB */
6468
6469 RTGCPHYS GCPhysFirst;
6470 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6471 if (rcStrict != VINF_SUCCESS)
6472 return rcStrict;
6473
6474 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6475 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6476 if (fAccess & IEM_ACCESS_TYPE_READ)
6477 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6478
6479 void *pvMem;
6480 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6481 if (rcStrict != VINF_SUCCESS)
6482 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6483
6484#endif /* !IEM_WITH_DATA_TLB */
6485
6486 /*
6487 * Fill in the mapping table entry.
6488 */
6489 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6490 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6491 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6492 pVCpu->iem.s.cActiveMappings += 1;
6493
6494 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6495 *ppvMem = pvMem;
6496
6497 return VINF_SUCCESS;
6498}
6499
6500
6501/**
6502 * Commits the guest memory if bounce buffered and unmaps it.
6503 *
6504 * @returns Strict VBox status code.
6505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6506 * @param pvMem The mapping.
6507 * @param fAccess The kind of access.
6508 */
6509VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6510{
6511 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6512 AssertReturn(iMemMap >= 0, iMemMap);
6513
6514 /* If it's bounce buffered, we may need to write back the buffer. */
6515 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6516 {
6517 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6518 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6519 }
6520 /* Otherwise unlock it. */
6521 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6522 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6523
6524 /* Free the entry. */
6525 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6526 Assert(pVCpu->iem.s.cActiveMappings != 0);
6527 pVCpu->iem.s.cActiveMappings--;
6528 return VINF_SUCCESS;
6529}
6530
6531#ifdef IEM_WITH_SETJMP
6532
6533/**
6534 * Maps the specified guest memory for the given kind of access, longjmp on
6535 * error.
6536 *
6537 * This may be using bounce buffering of the memory if it's crossing a page
6538 * boundary or if there is an access handler installed for any of it. Because
6539 * of lock prefix guarantees, we're in for some extra clutter when this
6540 * happens.
6541 *
6542 * This may raise a \#GP, \#SS, \#PF or \#AC.
6543 *
6544 * @returns Pointer to the mapped memory.
6545 *
6546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6547 * @param cbMem The number of bytes to map. This is usually 1,
6548 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6549 * string operations it can be up to a page.
6550 * @param iSegReg The index of the segment register to use for
6551 * this access. The base and limits are checked.
6552 * Use UINT8_MAX to indicate that no segmentation
6553 * is required (for IDT, GDT and LDT accesses).
6554 * @param GCPtrMem The address of the guest memory.
6555 * @param fAccess How the memory is being accessed. The
6556 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6557 * how to map the memory, while the
6558 * IEM_ACCESS_WHAT_XXX bit is used when raising
6559 * exceptions.
6560 * @param uAlignCtl Alignment control:
6561 * - Bits 15:0 is the alignment mask.
6562 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6563 * IEM_MEMMAP_F_ALIGN_SSE, and
6564 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6565 * Pass zero to skip alignment.
6566 */
6567void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6568 uint32_t uAlignCtl) IEM_NOEXCEPT_MAY_LONGJMP
6569{
6570 /*
6571 * Check the input, check segment access and adjust address
6572 * with segment base.
6573 */
6574 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6575 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6576 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6577
6578 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6579 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6580 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6581
6582 /*
6583 * Alignment check.
6584 */
6585 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6586 { /* likelyish */ }
6587 else
6588 {
6589 /* Misaligned access. */
6590 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6591 {
6592 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6593 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6594 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6595 {
6596 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6597
6598 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6599 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6600 }
6601 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6602 && (GCPtrMem & 3) /* The value 4 matches 10980xe's FXSAVE and helps make bs3-cpu-basic2 work. */
6603 /** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6604 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. Using 4 for now as
6605 * that's what FXSAVE does on a 10980xe. */
6606 && iemMemAreAlignmentChecksEnabled(pVCpu))
6607 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6608 else
6609 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6610 }
6611 }
6612
6613 /*
6614 * Figure out which mapping entry to use.
6615 */
6616 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6617 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6618 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6619 {
6620 iMemMap = iemMemMapFindFree(pVCpu);
6621 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6622 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6623 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6624 pVCpu->iem.s.aMemMappings[2].fAccess),
6625 IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_9));
6626 }
6627
6628 /*
6629 * Crossing a page boundary?
6630 */
6631 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6632 { /* No (likely). */ }
6633 else
6634 {
6635 void *pvMem;
6636 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6637 if (rcStrict == VINF_SUCCESS)
6638 return pvMem;
6639 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6640 }
6641
6642#ifdef IEM_WITH_DATA_TLB
6643 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6644
6645 /*
6646 * Get the TLB entry for this page.
6647 */
6648 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6649 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6650 if (pTlbe->uTag == uTag)
6651 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6652 else
6653 {
6654 pVCpu->iem.s.DataTlb.cTlbMisses++;
6655 PGMPTWALK Walk;
6656 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6657 if (RT_FAILURE(rc))
6658 {
6659 LogEx(LOG_GROUP_IEM, ("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6660# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6661 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6662 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6663# endif
6664 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, rc);
6665 }
6666
6667 Assert(Walk.fSucceeded);
6668 pTlbe->uTag = uTag;
6669 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6670 pTlbe->GCPhys = Walk.GCPhys;
6671 pTlbe->pbMappingR3 = NULL;
6672 }
6673
6674 /*
6675 * Check the flags and physical revision.
6676 */
6677 /** @todo make the caller pass these in with fAccess. */
6678 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && IEM_GET_CPL(pVCpu) == 3
6679 ? IEMTLBE_F_PT_NO_USER : 0;
6680 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6681 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6682 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6683 || (IEM_GET_CPL(pVCpu) == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6684 ? IEMTLBE_F_PT_NO_WRITE : 0)
6685 : 0;
6686 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6687 uint8_t *pbMem = NULL;
6688 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6689 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6690# ifdef IN_RING3
6691 pbMem = pTlbe->pbMappingR3;
6692# else
6693 pbMem = NULL;
6694# endif
6695 else
6696 {
6697 /*
6698 * Okay, something isn't quite right or needs refreshing.
6699 */
6700 /* Write to read only memory? */
6701 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6702 {
6703 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6704# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6705 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6706 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6707# endif
6708 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6709 }
6710
6711 /* Kernel memory accessed by userland? */
6712 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6713 {
6714 LogEx(LOG_GROUP_IEM, ("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6715# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6716 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6717 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6718# endif
6719 iemRaisePageFaultJmp(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, VERR_ACCESS_DENIED);
6720 }
6721
6722 /* Set the dirty / access flags.
6723 ASSUMES this is set when the address is translated rather than on commit... */
6724 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6725 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6726 {
6727 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6728 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6729 AssertRC(rc2);
6730 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6731 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6732 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6733 }
6734
6735 /*
6736 * Check if the physical page info needs updating.
6737 */
6738 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6739# ifdef IN_RING3
6740 pbMem = pTlbe->pbMappingR3;
6741# else
6742 pbMem = NULL;
6743# endif
6744 else
6745 {
6746 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6747 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6748 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6749 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6750 pTlbe->pbMappingR3 = NULL;
6751 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6752 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6753 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6754 &pbMem, &pTlbe->fFlagsAndPhysRev);
6755 AssertRCStmt(rc, IEM_DO_LONGJMP(pVCpu, rc));
6756# ifdef IN_RING3
6757 pTlbe->pbMappingR3 = pbMem;
6758# endif
6759 }
6760
6761 /*
6762 * Check the physical page level access and mapping.
6763 */
6764 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6765 { /* probably likely */ }
6766 else
6767 {
6768 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6769 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6770 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6771 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6772 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6773 if (rcStrict == VINF_SUCCESS)
6774 return pbMem;
6775 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6776 }
6777 }
6778 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6779
6780 if (pbMem)
6781 {
6782 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6783 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6784 fAccess |= IEM_ACCESS_NOT_LOCKED;
6785 }
6786 else
6787 {
6788 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6789 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6790 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6791 if (rcStrict == VINF_SUCCESS)
6792 return pbMem;
6793 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6794 }
6795
6796 void * const pvMem = pbMem;
6797
6798 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6799 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6800 if (fAccess & IEM_ACCESS_TYPE_READ)
6801 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6802
6803#else /* !IEM_WITH_DATA_TLB */
6804
6805
6806 RTGCPHYS GCPhysFirst;
6807 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, (uint32_t)cbMem, fAccess, &GCPhysFirst);
6808 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6809 else IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6810
6811 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6812 Log6(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6813 if (fAccess & IEM_ACCESS_TYPE_READ)
6814 Log2(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6815
6816 void *pvMem;
6817 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6818 if (rcStrict == VINF_SUCCESS)
6819 { /* likely */ }
6820 else
6821 {
6822 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6823 if (rcStrict == VINF_SUCCESS)
6824 return pvMem;
6825 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6826 }
6827
6828#endif /* !IEM_WITH_DATA_TLB */
6829
6830 /*
6831 * Fill in the mapping table entry.
6832 */
6833 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6834 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6835 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6836 pVCpu->iem.s.cActiveMappings++;
6837
6838 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6839 return pvMem;
6840}
6841
6842
6843/**
6844 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6845 *
6846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6847 * @param pvMem The mapping.
6848 * @param fAccess The kind of access.
6849 */
6850void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
6851{
6852 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6853 AssertStmt(iMemMap >= 0, IEM_DO_LONGJMP(pVCpu, iMemMap));
6854
6855 /* If it's bounce buffered, we may need to write back the buffer. */
6856 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6857 {
6858 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6859 {
6860 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6861 if (rcStrict == VINF_SUCCESS)
6862 return;
6863 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
6864 }
6865 }
6866 /* Otherwise unlock it. */
6867 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6868 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6869
6870 /* Free the entry. */
6871 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6872 Assert(pVCpu->iem.s.cActiveMappings != 0);
6873 pVCpu->iem.s.cActiveMappings--;
6874}
6875
6876
6877/** Fallback for iemMemCommitAndUnmapRwJmp. */
6878void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6879{
6880 Assert(bMapInfo == (1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4)) ); RT_NOREF_PV(bMapInfo);
6881 iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_RW);
6882}
6883
6884
6885/** Fallback for iemMemCommitAndUnmapWoJmp. */
6886void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6887{
6888 Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_WRITE << 4)) ); RT_NOREF_PV(bMapInfo);
6889 iemMemCommitAndUnmapJmp(pVCpu, pvMem, IEM_ACCESS_DATA_W);
6890}
6891
6892
6893/** Fallback for iemMemCommitAndUnmapRoJmp. */
6894void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
6895{
6896 Assert(bMapInfo == (1 | (IEM_ACCESS_TYPE_READ << 4)) ); RT_NOREF_PV(bMapInfo);
6897 iemMemCommitAndUnmapJmp(pVCpu, (void *)pvMem, IEM_ACCESS_DATA_R);
6898}
6899
6900#endif /* IEM_WITH_SETJMP */
6901
6902#ifndef IN_RING3
6903/**
6904 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6905 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6906 *
6907 * Allows the instruction to be completed and retired, while the IEM user will
6908 * return to ring-3 immediately afterwards and do the postponed writes there.
6909 *
6910 * @returns VBox status code (no strict statuses). Caller must check
6911 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6913 * @param pvMem The mapping.
6914 * @param fAccess The kind of access.
6915 */
6916VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6917{
6918 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6919 AssertReturn(iMemMap >= 0, iMemMap);
6920
6921 /* If it's bounce buffered, we may need to write back the buffer. */
6922 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6923 {
6924 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6925 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6926 }
6927 /* Otherwise unlock it. */
6928 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6929 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6930
6931 /* Free the entry. */
6932 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6933 Assert(pVCpu->iem.s.cActiveMappings != 0);
6934 pVCpu->iem.s.cActiveMappings--;
6935 return VINF_SUCCESS;
6936}
6937#endif
6938
6939
6940/**
6941 * Rollbacks mappings, releasing page locks and such.
6942 *
6943 * The caller shall only call this after checking cActiveMappings.
6944 *
6945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6946 */
6947void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6948{
6949 Assert(pVCpu->iem.s.cActiveMappings > 0);
6950
6951 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6952 while (iMemMap-- > 0)
6953 {
6954 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6955 if (fAccess != IEM_ACCESS_INVALID)
6956 {
6957 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6958 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6959 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6960 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6961 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6962 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6963 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6964 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6965 pVCpu->iem.s.cActiveMappings--;
6966 }
6967 }
6968}
6969
6970
6971/*
6972 * Instantiate R/W templates.
6973 */
6974#define TMPL_MEM_WITH_STACK
6975
6976#define TMPL_MEM_TYPE uint8_t
6977#define TMPL_MEM_FN_SUFF U8
6978#define TMPL_MEM_FMT_TYPE "%#04x"
6979#define TMPL_MEM_FMT_DESC "byte"
6980#include "IEMAllMemRWTmpl.cpp.h"
6981
6982#define TMPL_MEM_TYPE uint16_t
6983#define TMPL_MEM_FN_SUFF U16
6984#define TMPL_MEM_FMT_TYPE "%#06x"
6985#define TMPL_MEM_FMT_DESC "word"
6986#include "IEMAllMemRWTmpl.cpp.h"
6987
6988#define TMPL_WITH_PUSH_SREG
6989#define TMPL_MEM_TYPE uint32_t
6990#define TMPL_MEM_FN_SUFF U32
6991#define TMPL_MEM_FMT_TYPE "%#010x"
6992#define TMPL_MEM_FMT_DESC "dword"
6993#include "IEMAllMemRWTmpl.cpp.h"
6994#undef TMPL_WITH_PUSH_SREG
6995
6996#define TMPL_MEM_TYPE uint64_t
6997#define TMPL_MEM_FN_SUFF U64
6998#define TMPL_MEM_FMT_TYPE "%#018RX64"
6999#define TMPL_MEM_FMT_DESC "qword"
7000#include "IEMAllMemRWTmpl.cpp.h"
7001
7002#undef TMPL_MEM_WITH_STACK
7003
7004#define TMPL_MEM_TYPE uint64_t
7005#define TMPL_MEM_TYPE_ALIGN (sizeof(uint64_t) * 2 - 1)
7006#define TMPL_MEM_FN_SUFF U64AlignedU128
7007#define TMPL_MEM_FMT_TYPE "%#018RX64"
7008#define TMPL_MEM_FMT_DESC "qword"
7009#include "IEMAllMemRWTmpl.cpp.h"
7010
7011
7012/**
7013 * Fetches a data dword and zero extends it to a qword.
7014 *
7015 * @returns Strict VBox status code.
7016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7017 * @param pu64Dst Where to return the qword.
7018 * @param iSegReg The index of the segment register to use for
7019 * this access. The base and limits are checked.
7020 * @param GCPtrMem The address of the guest memory.
7021 */
7022VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7023{
7024 /* The lazy approach for now... */
7025 uint32_t const *pu32Src;
7026 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
7027 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
7028 if (rc == VINF_SUCCESS)
7029 {
7030 *pu64Dst = *pu32Src;
7031 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
7032 Log(("IEM RD dword %d|%RGv: %#010RX64\n", iSegReg, GCPtrMem, *pu64Dst));
7033 }
7034 return rc;
7035}
7036
7037
7038#ifdef SOME_UNUSED_FUNCTION
7039/**
7040 * Fetches a data dword and sign extends it to a qword.
7041 *
7042 * @returns Strict VBox status code.
7043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7044 * @param pu64Dst Where to return the sign extended value.
7045 * @param iSegReg The index of the segment register to use for
7046 * this access. The base and limits are checked.
7047 * @param GCPtrMem The address of the guest memory.
7048 */
7049VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7050{
7051 /* The lazy approach for now... */
7052 int32_t const *pi32Src;
7053 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
7054 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
7055 if (rc == VINF_SUCCESS)
7056 {
7057 *pu64Dst = *pi32Src;
7058 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
7059 Log(("IEM RD dword %d|%RGv: %#010x\n", iSegReg, GCPtrMem, (uint32_t)*pu64Dst));
7060 }
7061#ifdef __GNUC__ /* warning: GCC may be a royal pain */
7062 else
7063 *pu64Dst = 0;
7064#endif
7065 return rc;
7066}
7067#endif
7068
7069
7070/**
7071 * Fetches a data tword.
7072 *
7073 * @returns Strict VBox status code.
7074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7075 * @param pr80Dst Where to return the tword.
7076 * @param iSegReg The index of the segment register to use for
7077 * this access. The base and limits are checked.
7078 * @param GCPtrMem The address of the guest memory.
7079 */
7080VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7081{
7082 /* The lazy approach for now... */
7083 PCRTFLOAT80U pr80Src;
7084 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7085 if (rc == VINF_SUCCESS)
7086 {
7087 *pr80Dst = *pr80Src;
7088 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7089 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7090 }
7091 return rc;
7092}
7093
7094
7095#ifdef IEM_WITH_SETJMP
7096/**
7097 * Fetches a data tword, longjmp on error.
7098 *
7099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7100 * @param pr80Dst Where to return the tword.
7101 * @param iSegReg The index of the segment register to use for
7102 * this access. The base and limits are checked.
7103 * @param GCPtrMem The address of the guest memory.
7104 */
7105void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7106{
7107 /* The lazy approach for now... */
7108 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7109 *pr80Dst = *pr80Src;
7110 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7111 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pr80Dst));
7112}
7113#endif
7114
7115
7116/**
7117 * Fetches a data decimal tword.
7118 *
7119 * @returns Strict VBox status code.
7120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7121 * @param pd80Dst Where to return the tword.
7122 * @param iSegReg The index of the segment register to use for
7123 * this access. The base and limits are checked.
7124 * @param GCPtrMem The address of the guest memory.
7125 */
7126VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7127{
7128 /* The lazy approach for now... */
7129 PCRTPBCD80U pd80Src;
7130 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7131 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7132 if (rc == VINF_SUCCESS)
7133 {
7134 *pd80Dst = *pd80Src;
7135 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7136 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7137 }
7138 return rc;
7139}
7140
7141
7142#ifdef IEM_WITH_SETJMP
7143/**
7144 * Fetches a data decimal tword, longjmp on error.
7145 *
7146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7147 * @param pd80Dst Where to return the tword.
7148 * @param iSegReg The index of the segment register to use for
7149 * this access. The base and limits are checked.
7150 * @param GCPtrMem The address of the guest memory.
7151 */
7152void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7153{
7154 /* The lazy approach for now... */
7155 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7156 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7157 *pd80Dst = *pd80Src;
7158 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7159 Log(("IEM RD tword %d|%RGv: %.10Rhxs\n", iSegReg, GCPtrMem, pd80Dst));
7160}
7161#endif
7162
7163
7164/**
7165 * Fetches a data dqword (double qword), generally SSE related.
7166 *
7167 * @returns Strict VBox status code.
7168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7169 * @param pu128Dst Where to return the qword.
7170 * @param iSegReg The index of the segment register to use for
7171 * this access. The base and limits are checked.
7172 * @param GCPtrMem The address of the guest memory.
7173 */
7174VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7175{
7176 /* The lazy approach for now... */
7177 PCRTUINT128U pu128Src;
7178 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7179 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7180 if (rc == VINF_SUCCESS)
7181 {
7182 pu128Dst->au64[0] = pu128Src->au64[0];
7183 pu128Dst->au64[1] = pu128Src->au64[1];
7184 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7185 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7186 }
7187 return rc;
7188}
7189
7190
7191#ifdef IEM_WITH_SETJMP
7192/**
7193 * Fetches a data dqword (double qword), generally SSE related.
7194 *
7195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7196 * @param pu128Dst Where to return the qword.
7197 * @param iSegReg The index of the segment register to use for
7198 * this access. The base and limits are checked.
7199 * @param GCPtrMem The address of the guest memory.
7200 */
7201void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7202{
7203 /* The lazy approach for now... */
7204 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7205 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7206 pu128Dst->au64[0] = pu128Src->au64[0];
7207 pu128Dst->au64[1] = pu128Src->au64[1];
7208 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7209 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7210}
7211#endif
7212
7213
7214/**
7215 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7216 * related.
7217 *
7218 * Raises \#GP(0) if not aligned.
7219 *
7220 * @returns Strict VBox status code.
7221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7222 * @param pu128Dst Where to return the qword.
7223 * @param iSegReg The index of the segment register to use for
7224 * this access. The base and limits are checked.
7225 * @param GCPtrMem The address of the guest memory.
7226 */
7227VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7228{
7229 /* The lazy approach for now... */
7230 PCRTUINT128U pu128Src;
7231 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7232 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7233 if (rc == VINF_SUCCESS)
7234 {
7235 pu128Dst->au64[0] = pu128Src->au64[0];
7236 pu128Dst->au64[1] = pu128Src->au64[1];
7237 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7238 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7239 }
7240 return rc;
7241}
7242
7243
7244#ifdef IEM_WITH_SETJMP
7245/**
7246 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7247 * related, longjmp on error.
7248 *
7249 * Raises \#GP(0) if not aligned.
7250 *
7251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7252 * @param pu128Dst Where to return the qword.
7253 * @param iSegReg The index of the segment register to use for
7254 * this access. The base and limits are checked.
7255 * @param GCPtrMem The address of the guest memory.
7256 */
7257void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,
7258 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7259{
7260 /* The lazy approach for now... */
7261 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7262 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7263 pu128Dst->au64[0] = pu128Src->au64[0];
7264 pu128Dst->au64[1] = pu128Src->au64[1];
7265 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7266 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7267}
7268#endif
7269
7270
7271/**
7272 * Fetches a data oword (octo word), generally AVX related.
7273 *
7274 * @returns Strict VBox status code.
7275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7276 * @param pu256Dst Where to return the qword.
7277 * @param iSegReg The index of the segment register to use for
7278 * this access. The base and limits are checked.
7279 * @param GCPtrMem The address of the guest memory.
7280 */
7281VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7282{
7283 /* The lazy approach for now... */
7284 PCRTUINT256U pu256Src;
7285 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7286 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7287 if (rc == VINF_SUCCESS)
7288 {
7289 pu256Dst->au64[0] = pu256Src->au64[0];
7290 pu256Dst->au64[1] = pu256Src->au64[1];
7291 pu256Dst->au64[2] = pu256Src->au64[2];
7292 pu256Dst->au64[3] = pu256Src->au64[3];
7293 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7294 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7295 }
7296 return rc;
7297}
7298
7299
7300#ifdef IEM_WITH_SETJMP
7301/**
7302 * Fetches a data oword (octo word), generally AVX related.
7303 *
7304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7305 * @param pu256Dst Where to return the qword.
7306 * @param iSegReg The index of the segment register to use for
7307 * this access. The base and limits are checked.
7308 * @param GCPtrMem The address of the guest memory.
7309 */
7310void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7311{
7312 /* The lazy approach for now... */
7313 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7314 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7315 pu256Dst->au64[0] = pu256Src->au64[0];
7316 pu256Dst->au64[1] = pu256Src->au64[1];
7317 pu256Dst->au64[2] = pu256Src->au64[2];
7318 pu256Dst->au64[3] = pu256Src->au64[3];
7319 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7320 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7321}
7322#endif
7323
7324
7325/**
7326 * Fetches a data oword (octo word) at an aligned address, generally AVX
7327 * related.
7328 *
7329 * Raises \#GP(0) if not aligned.
7330 *
7331 * @returns Strict VBox status code.
7332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7333 * @param pu256Dst Where to return the qword.
7334 * @param iSegReg The index of the segment register to use for
7335 * this access. The base and limits are checked.
7336 * @param GCPtrMem The address of the guest memory.
7337 */
7338VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7339{
7340 /* The lazy approach for now... */
7341 PCRTUINT256U pu256Src;
7342 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7343 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7344 if (rc == VINF_SUCCESS)
7345 {
7346 pu256Dst->au64[0] = pu256Src->au64[0];
7347 pu256Dst->au64[1] = pu256Src->au64[1];
7348 pu256Dst->au64[2] = pu256Src->au64[2];
7349 pu256Dst->au64[3] = pu256Src->au64[3];
7350 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7351 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7352 }
7353 return rc;
7354}
7355
7356
7357#ifdef IEM_WITH_SETJMP
7358/**
7359 * Fetches a data oword (octo word) at an aligned address, generally AVX
7360 * related, longjmp on error.
7361 *
7362 * Raises \#GP(0) if not aligned.
7363 *
7364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7365 * @param pu256Dst Where to return the qword.
7366 * @param iSegReg The index of the segment register to use for
7367 * this access. The base and limits are checked.
7368 * @param GCPtrMem The address of the guest memory.
7369 */
7370void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg,
7371 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
7372{
7373 /* The lazy approach for now... */
7374 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7375 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7376 pu256Dst->au64[0] = pu256Src->au64[0];
7377 pu256Dst->au64[1] = pu256Src->au64[1];
7378 pu256Dst->au64[2] = pu256Src->au64[2];
7379 pu256Dst->au64[3] = pu256Src->au64[3];
7380 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7381 Log(("IEM RD qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7382}
7383#endif
7384
7385
7386
7387/**
7388 * Fetches a descriptor register (lgdt, lidt).
7389 *
7390 * @returns Strict VBox status code.
7391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7392 * @param pcbLimit Where to return the limit.
7393 * @param pGCPtrBase Where to return the base.
7394 * @param iSegReg The index of the segment register to use for
7395 * this access. The base and limits are checked.
7396 * @param GCPtrMem The address of the guest memory.
7397 * @param enmOpSize The effective operand size.
7398 */
7399VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7400 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7401{
7402 /*
7403 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7404 * little special:
7405 * - The two reads are done separately.
7406 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7407 * - We suspect the 386 to actually commit the limit before the base in
7408 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7409 * don't try emulate this eccentric behavior, because it's not well
7410 * enough understood and rather hard to trigger.
7411 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7412 */
7413 VBOXSTRICTRC rcStrict;
7414 if (IEM_IS_64BIT_CODE(pVCpu))
7415 {
7416 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7417 if (rcStrict == VINF_SUCCESS)
7418 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7419 }
7420 else
7421 {
7422 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7423 if (enmOpSize == IEMMODE_32BIT)
7424 {
7425 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7426 {
7427 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7428 if (rcStrict == VINF_SUCCESS)
7429 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7430 }
7431 else
7432 {
7433 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7434 if (rcStrict == VINF_SUCCESS)
7435 {
7436 *pcbLimit = (uint16_t)uTmp;
7437 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7438 }
7439 }
7440 if (rcStrict == VINF_SUCCESS)
7441 *pGCPtrBase = uTmp;
7442 }
7443 else
7444 {
7445 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7446 if (rcStrict == VINF_SUCCESS)
7447 {
7448 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7449 if (rcStrict == VINF_SUCCESS)
7450 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7451 }
7452 }
7453 }
7454 return rcStrict;
7455}
7456
7457
7458/**
7459 * Stores a data dqword.
7460 *
7461 * @returns Strict VBox status code.
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 * @param iSegReg The index of the segment register to use for
7464 * this access. The base and limits are checked.
7465 * @param GCPtrMem The address of the guest memory.
7466 * @param u128Value The value to store.
7467 */
7468VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7469{
7470 /* The lazy approach for now... */
7471 PRTUINT128U pu128Dst;
7472 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7473 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7474 if (rc == VINF_SUCCESS)
7475 {
7476 pu128Dst->au64[0] = u128Value.au64[0];
7477 pu128Dst->au64[1] = u128Value.au64[1];
7478 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7479 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7480 }
7481 return rc;
7482}
7483
7484
7485#ifdef IEM_WITH_SETJMP
7486/**
7487 * Stores a data dqword, longjmp on error.
7488 *
7489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7490 * @param iSegReg The index of the segment register to use for
7491 * this access. The base and limits are checked.
7492 * @param GCPtrMem The address of the guest memory.
7493 * @param u128Value The value to store.
7494 */
7495void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7496{
7497 /* The lazy approach for now... */
7498 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7499 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7500 pu128Dst->au64[0] = u128Value.au64[0];
7501 pu128Dst->au64[1] = u128Value.au64[1];
7502 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7503 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7504}
7505#endif
7506
7507
7508/**
7509 * Stores a data dqword, SSE aligned.
7510 *
7511 * @returns Strict VBox status code.
7512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7513 * @param iSegReg The index of the segment register to use for
7514 * this access. The base and limits are checked.
7515 * @param GCPtrMem The address of the guest memory.
7516 * @param u128Value The value to store.
7517 */
7518VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7519{
7520 /* The lazy approach for now... */
7521 PRTUINT128U pu128Dst;
7522 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7523 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7524 if (rc == VINF_SUCCESS)
7525 {
7526 pu128Dst->au64[0] = u128Value.au64[0];
7527 pu128Dst->au64[1] = u128Value.au64[1];
7528 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7529 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7530 }
7531 return rc;
7532}
7533
7534
7535#ifdef IEM_WITH_SETJMP
7536/**
7537 * Stores a data dqword, SSE aligned.
7538 *
7539 * @returns Strict VBox status code.
7540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7541 * @param iSegReg The index of the segment register to use for
7542 * this access. The base and limits are checked.
7543 * @param GCPtrMem The address of the guest memory.
7544 * @param u128Value The value to store.
7545 */
7546void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7547 RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP
7548{
7549 /* The lazy approach for now... */
7550 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7551 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7552 pu128Dst->au64[0] = u128Value.au64[0];
7553 pu128Dst->au64[1] = u128Value.au64[1];
7554 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7555 Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
7556}
7557#endif
7558
7559
7560/**
7561 * Stores a data dqword.
7562 *
7563 * @returns Strict VBox status code.
7564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7565 * @param iSegReg The index of the segment register to use for
7566 * this access. The base and limits are checked.
7567 * @param GCPtrMem The address of the guest memory.
7568 * @param pu256Value Pointer to the value to store.
7569 */
7570VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7571{
7572 /* The lazy approach for now... */
7573 PRTUINT256U pu256Dst;
7574 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7575 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7576 if (rc == VINF_SUCCESS)
7577 {
7578 pu256Dst->au64[0] = pu256Value->au64[0];
7579 pu256Dst->au64[1] = pu256Value->au64[1];
7580 pu256Dst->au64[2] = pu256Value->au64[2];
7581 pu256Dst->au64[3] = pu256Value->au64[3];
7582 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7583 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7584 }
7585 return rc;
7586}
7587
7588
7589#ifdef IEM_WITH_SETJMP
7590/**
7591 * Stores a data dqword, longjmp on error.
7592 *
7593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7594 * @param iSegReg The index of the segment register to use for
7595 * this access. The base and limits are checked.
7596 * @param GCPtrMem The address of the guest memory.
7597 * @param pu256Value Pointer to the value to store.
7598 */
7599void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7600{
7601 /* The lazy approach for now... */
7602 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7603 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7604 pu256Dst->au64[0] = pu256Value->au64[0];
7605 pu256Dst->au64[1] = pu256Value->au64[1];
7606 pu256Dst->au64[2] = pu256Value->au64[2];
7607 pu256Dst->au64[3] = pu256Value->au64[3];
7608 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7609 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7610}
7611#endif
7612
7613
7614/**
7615 * Stores a data dqword, AVX \#GP(0) aligned.
7616 *
7617 * @returns Strict VBox status code.
7618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7619 * @param iSegReg The index of the segment register to use for
7620 * this access. The base and limits are checked.
7621 * @param GCPtrMem The address of the guest memory.
7622 * @param pu256Value Pointer to the value to store.
7623 */
7624VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7625{
7626 /* The lazy approach for now... */
7627 PRTUINT256U pu256Dst;
7628 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7629 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7630 if (rc == VINF_SUCCESS)
7631 {
7632 pu256Dst->au64[0] = pu256Value->au64[0];
7633 pu256Dst->au64[1] = pu256Value->au64[1];
7634 pu256Dst->au64[2] = pu256Value->au64[2];
7635 pu256Dst->au64[3] = pu256Value->au64[3];
7636 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7637 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7638 }
7639 return rc;
7640}
7641
7642
7643#ifdef IEM_WITH_SETJMP
7644/**
7645 * Stores a data dqword, AVX aligned.
7646 *
7647 * @returns Strict VBox status code.
7648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7649 * @param iSegReg The index of the segment register to use for
7650 * this access. The base and limits are checked.
7651 * @param GCPtrMem The address of the guest memory.
7652 * @param pu256Value Pointer to the value to store.
7653 */
7654void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
7655 PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP
7656{
7657 /* The lazy approach for now... */
7658 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7659 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7660 pu256Dst->au64[0] = pu256Value->au64[0];
7661 pu256Dst->au64[1] = pu256Value->au64[1];
7662 pu256Dst->au64[2] = pu256Value->au64[2];
7663 pu256Dst->au64[3] = pu256Value->au64[3];
7664 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7665 Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
7666}
7667#endif
7668
7669
7670/**
7671 * Stores a descriptor register (sgdt, sidt).
7672 *
7673 * @returns Strict VBox status code.
7674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7675 * @param cbLimit The limit.
7676 * @param GCPtrBase The base address.
7677 * @param iSegReg The index of the segment register to use for
7678 * this access. The base and limits are checked.
7679 * @param GCPtrMem The address of the guest memory.
7680 */
7681VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7682{
7683 /*
7684 * The SIDT and SGDT instructions actually stores the data using two
7685 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7686 * does not respond to opsize prefixes.
7687 */
7688 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7689 if (rcStrict == VINF_SUCCESS)
7690 {
7691 if (IEM_IS_16BIT_CODE(pVCpu))
7692 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7693 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7694 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7695 else if (IEM_IS_32BIT_CODE(pVCpu))
7696 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7697 else
7698 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7699 }
7700 return rcStrict;
7701}
7702
7703
7704/**
7705 * Begin a special stack push (used by interrupt, exceptions and such).
7706 *
7707 * This will raise \#SS or \#PF if appropriate.
7708 *
7709 * @returns Strict VBox status code.
7710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7711 * @param cbMem The number of bytes to push onto the stack.
7712 * @param cbAlign The alignment mask (7, 3, 1).
7713 * @param ppvMem Where to return the pointer to the stack memory.
7714 * As with the other memory functions this could be
7715 * direct access or bounce buffered access, so
7716 * don't commit register until the commit call
7717 * succeeds.
7718 * @param puNewRsp Where to return the new RSP value. This must be
7719 * passed unchanged to
7720 * iemMemStackPushCommitSpecial().
7721 */
7722VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7723 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
7724{
7725 Assert(cbMem < UINT8_MAX);
7726 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
7727 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
7728 IEM_ACCESS_STACK_W, cbAlign);
7729}
7730
7731
7732/**
7733 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
7734 *
7735 * This will update the rSP.
7736 *
7737 * @returns Strict VBox status code.
7738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7739 * @param pvMem The pointer returned by
7740 * iemMemStackPushBeginSpecial().
7741 * @param uNewRsp The new RSP value returned by
7742 * iemMemStackPushBeginSpecial().
7743 */
7744VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
7745{
7746 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
7747 if (rcStrict == VINF_SUCCESS)
7748 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7749 return rcStrict;
7750}
7751
7752
7753/**
7754 * Begin a special stack pop (used by iret, retf and such).
7755 *
7756 * This will raise \#SS or \#PF if appropriate.
7757 *
7758 * @returns Strict VBox status code.
7759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7760 * @param cbMem The number of bytes to pop from the stack.
7761 * @param cbAlign The alignment mask (7, 3, 1).
7762 * @param ppvMem Where to return the pointer to the stack memory.
7763 * @param puNewRsp Where to return the new RSP value. This must be
7764 * assigned to CPUMCTX::rsp manually some time
7765 * after iemMemStackPopDoneSpecial() has been
7766 * called.
7767 */
7768VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
7769 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
7770{
7771 Assert(cbMem < UINT8_MAX);
7772 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
7773 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
7774}
7775
7776
7777/**
7778 * Continue a special stack pop (used by iret and retf), for the purpose of
7779 * retrieving a new stack pointer.
7780 *
7781 * This will raise \#SS or \#PF if appropriate.
7782 *
7783 * @returns Strict VBox status code.
7784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7785 * @param off Offset from the top of the stack. This is zero
7786 * except in the retf case.
7787 * @param cbMem The number of bytes to pop from the stack.
7788 * @param ppvMem Where to return the pointer to the stack memory.
7789 * @param uCurNewRsp The current uncommitted RSP value. (No need to
7790 * return this because all use of this function is
7791 * to retrieve a new value and anything we return
7792 * here would be discarded.)
7793 */
7794VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
7795 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT
7796{
7797 Assert(cbMem < UINT8_MAX);
7798
7799 /* The essense of iemRegGetRspForPopEx and friends: */ /** @todo put this into a inlined function? */
7800 RTGCPTR GCPtrTop;
7801 if (IEM_IS_64BIT_CODE(pVCpu))
7802 GCPtrTop = uCurNewRsp;
7803 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7804 GCPtrTop = (uint32_t)uCurNewRsp;
7805 else
7806 GCPtrTop = (uint16_t)uCurNewRsp;
7807
7808 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop + off, IEM_ACCESS_STACK_R,
7809 0 /* checked in iemMemStackPopBeginSpecial */);
7810}
7811
7812
7813/**
7814 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
7815 * iemMemStackPopContinueSpecial).
7816 *
7817 * The caller will manually commit the rSP.
7818 *
7819 * @returns Strict VBox status code.
7820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7821 * @param pvMem The pointer returned by
7822 * iemMemStackPopBeginSpecial() or
7823 * iemMemStackPopContinueSpecial().
7824 */
7825VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
7826{
7827 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
7828}
7829
7830
7831/**
7832 * Fetches a system table byte.
7833 *
7834 * @returns Strict VBox status code.
7835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7836 * @param pbDst Where to return the byte.
7837 * @param iSegReg The index of the segment register to use for
7838 * this access. The base and limits are checked.
7839 * @param GCPtrMem The address of the guest memory.
7840 */
7841VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7842{
7843 /* The lazy approach for now... */
7844 uint8_t const *pbSrc;
7845 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7846 if (rc == VINF_SUCCESS)
7847 {
7848 *pbDst = *pbSrc;
7849 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
7850 }
7851 return rc;
7852}
7853
7854
7855/**
7856 * Fetches a system table word.
7857 *
7858 * @returns Strict VBox status code.
7859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7860 * @param pu16Dst Where to return the word.
7861 * @param iSegReg The index of the segment register to use for
7862 * this access. The base and limits are checked.
7863 * @param GCPtrMem The address of the guest memory.
7864 */
7865VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7866{
7867 /* The lazy approach for now... */
7868 uint16_t const *pu16Src;
7869 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7870 if (rc == VINF_SUCCESS)
7871 {
7872 *pu16Dst = *pu16Src;
7873 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
7874 }
7875 return rc;
7876}
7877
7878
7879/**
7880 * Fetches a system table dword.
7881 *
7882 * @returns Strict VBox status code.
7883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7884 * @param pu32Dst Where to return the dword.
7885 * @param iSegReg The index of the segment register to use for
7886 * this access. The base and limits are checked.
7887 * @param GCPtrMem The address of the guest memory.
7888 */
7889VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7890{
7891 /* The lazy approach for now... */
7892 uint32_t const *pu32Src;
7893 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7894 if (rc == VINF_SUCCESS)
7895 {
7896 *pu32Dst = *pu32Src;
7897 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
7898 }
7899 return rc;
7900}
7901
7902
7903/**
7904 * Fetches a system table qword.
7905 *
7906 * @returns Strict VBox status code.
7907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7908 * @param pu64Dst Where to return the qword.
7909 * @param iSegReg The index of the segment register to use for
7910 * this access. The base and limits are checked.
7911 * @param GCPtrMem The address of the guest memory.
7912 */
7913VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7914{
7915 /* The lazy approach for now... */
7916 uint64_t const *pu64Src;
7917 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
7918 if (rc == VINF_SUCCESS)
7919 {
7920 *pu64Dst = *pu64Src;
7921 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
7922 }
7923 return rc;
7924}
7925
7926
7927/**
7928 * Fetches a descriptor table entry with caller specified error code.
7929 *
7930 * @returns Strict VBox status code.
7931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7932 * @param pDesc Where to return the descriptor table entry.
7933 * @param uSel The selector which table entry to fetch.
7934 * @param uXcpt The exception to raise on table lookup error.
7935 * @param uErrorCode The error code associated with the exception.
7936 */
7937static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
7938 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
7939{
7940 AssertPtr(pDesc);
7941 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
7942
7943 /** @todo did the 286 require all 8 bytes to be accessible? */
7944 /*
7945 * Get the selector table base and check bounds.
7946 */
7947 RTGCPTR GCPtrBase;
7948 if (uSel & X86_SEL_LDT)
7949 {
7950 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
7951 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
7952 {
7953 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
7954 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
7955 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7956 uErrorCode, 0);
7957 }
7958
7959 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
7960 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
7961 }
7962 else
7963 {
7964 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
7965 {
7966 LogEx(LOG_GROUP_IEM, ("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
7967 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
7968 uErrorCode, 0);
7969 }
7970 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
7971 }
7972
7973 /*
7974 * Read the legacy descriptor and maybe the long mode extensions if
7975 * required.
7976 */
7977 VBOXSTRICTRC rcStrict;
7978 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
7979 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
7980 else
7981 {
7982 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
7983 if (rcStrict == VINF_SUCCESS)
7984 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
7985 if (rcStrict == VINF_SUCCESS)
7986 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
7987 if (rcStrict == VINF_SUCCESS)
7988 pDesc->Legacy.au16[3] = 0;
7989 else
7990 return rcStrict;
7991 }
7992
7993 if (rcStrict == VINF_SUCCESS)
7994 {
7995 if ( !IEM_IS_LONG_MODE(pVCpu)
7996 || pDesc->Legacy.Gen.u1DescType)
7997 pDesc->Long.au64[1] = 0;
7998 else if ( (uint32_t)(uSel | X86_SEL_RPL_LDT) + 8
7999 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8000 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8001 else
8002 {
8003 LogEx(LOG_GROUP_IEM,("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8004 /** @todo is this the right exception? */
8005 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8006 }
8007 }
8008 return rcStrict;
8009}
8010
8011
8012/**
8013 * Fetches a descriptor table entry.
8014 *
8015 * @returns Strict VBox status code.
8016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8017 * @param pDesc Where to return the descriptor table entry.
8018 * @param uSel The selector which table entry to fetch.
8019 * @param uXcpt The exception to raise on table lookup error.
8020 */
8021VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8022{
8023 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8024}
8025
8026
8027/**
8028 * Marks the selector descriptor as accessed (only non-system descriptors).
8029 *
8030 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8031 * will therefore skip the limit checks.
8032 *
8033 * @returns Strict VBox status code.
8034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8035 * @param uSel The selector.
8036 */
8037VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8038{
8039 /*
8040 * Get the selector table base and calculate the entry address.
8041 */
8042 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8043 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8044 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8045 GCPtr += uSel & X86_SEL_MASK;
8046
8047 /*
8048 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8049 * ugly stuff to avoid this. This will make sure it's an atomic access
8050 * as well more or less remove any question about 8-bit or 32-bit accesss.
8051 */
8052 VBOXSTRICTRC rcStrict;
8053 uint32_t volatile *pu32;
8054 if ((GCPtr & 3) == 0)
8055 {
8056 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8057 GCPtr += 2 + 2;
8058 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8059 if (rcStrict != VINF_SUCCESS)
8060 return rcStrict;
8061 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8062 }
8063 else
8064 {
8065 /* The misaligned GDT/LDT case, map the whole thing. */
8066 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8067 if (rcStrict != VINF_SUCCESS)
8068 return rcStrict;
8069 switch ((uintptr_t)pu32 & 3)
8070 {
8071 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8072 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8073 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8074 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8075 }
8076 }
8077
8078 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8079}
8080
8081
8082#undef LOG_GROUP
8083#define LOG_GROUP LOG_GROUP_IEM
8084
8085/** @} */
8086
8087/** @name Opcode Helpers.
8088 * @{
8089 */
8090
8091/**
8092 * Calculates the effective address of a ModR/M memory operand.
8093 *
8094 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8095 *
8096 * @return Strict VBox status code.
8097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8098 * @param bRm The ModRM byte.
8099 * @param cbImmAndRspOffset - First byte: The size of any immediate
8100 * following the effective address opcode bytes
8101 * (only for RIP relative addressing).
8102 * - Second byte: RSP displacement (for POP [ESP]).
8103 * @param pGCPtrEff Where to return the effective address.
8104 */
8105VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8106{
8107 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8108# define SET_SS_DEF() \
8109 do \
8110 { \
8111 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8112 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8113 } while (0)
8114
8115 if (!IEM_IS_64BIT_CODE(pVCpu))
8116 {
8117/** @todo Check the effective address size crap! */
8118 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8119 {
8120 uint16_t u16EffAddr;
8121
8122 /* Handle the disp16 form with no registers first. */
8123 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8124 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8125 else
8126 {
8127 /* Get the displacment. */
8128 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8129 {
8130 case 0: u16EffAddr = 0; break;
8131 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8132 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8133 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8134 }
8135
8136 /* Add the base and index registers to the disp. */
8137 switch (bRm & X86_MODRM_RM_MASK)
8138 {
8139 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8140 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8141 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8142 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8143 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8144 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8145 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8146 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8147 }
8148 }
8149
8150 *pGCPtrEff = u16EffAddr;
8151 }
8152 else
8153 {
8154 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8155 uint32_t u32EffAddr;
8156
8157 /* Handle the disp32 form with no registers first. */
8158 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8159 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8160 else
8161 {
8162 /* Get the register (or SIB) value. */
8163 switch ((bRm & X86_MODRM_RM_MASK))
8164 {
8165 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8166 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8167 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8168 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8169 case 4: /* SIB */
8170 {
8171 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8172
8173 /* Get the index and scale it. */
8174 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8175 {
8176 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8177 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8178 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8179 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8180 case 4: u32EffAddr = 0; /*none */ break;
8181 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8182 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8183 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8184 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8185 }
8186 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8187
8188 /* add base */
8189 switch (bSib & X86_SIB_BASE_MASK)
8190 {
8191 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8192 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8193 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8194 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8195 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8196 case 5:
8197 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8198 {
8199 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8200 SET_SS_DEF();
8201 }
8202 else
8203 {
8204 uint32_t u32Disp;
8205 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8206 u32EffAddr += u32Disp;
8207 }
8208 break;
8209 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8210 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8211 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8212 }
8213 break;
8214 }
8215 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8216 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8217 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8218 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8219 }
8220
8221 /* Get and add the displacement. */
8222 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8223 {
8224 case 0:
8225 break;
8226 case 1:
8227 {
8228 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8229 u32EffAddr += i8Disp;
8230 break;
8231 }
8232 case 2:
8233 {
8234 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8235 u32EffAddr += u32Disp;
8236 break;
8237 }
8238 default:
8239 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8240 }
8241
8242 }
8243 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8244 *pGCPtrEff = u32EffAddr;
8245 else
8246 {
8247 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8248 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8249 }
8250 }
8251 }
8252 else
8253 {
8254 uint64_t u64EffAddr;
8255
8256 /* Handle the rip+disp32 form with no registers first. */
8257 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8258 {
8259 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8260 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8261 }
8262 else
8263 {
8264 /* Get the register (or SIB) value. */
8265 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8266 {
8267 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8268 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8269 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8270 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8271 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8272 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8273 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8274 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8275 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8276 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8277 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8278 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8279 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8280 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8281 /* SIB */
8282 case 4:
8283 case 12:
8284 {
8285 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8286
8287 /* Get the index and scale it. */
8288 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8289 {
8290 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8291 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8292 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8293 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8294 case 4: u64EffAddr = 0; /*none */ break;
8295 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8296 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8297 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8298 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8299 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8300 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8301 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8302 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8303 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8304 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8305 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8306 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8307 }
8308 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8309
8310 /* add base */
8311 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8312 {
8313 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8314 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8315 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8316 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8317 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8318 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8319 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8320 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8321 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8322 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8323 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8324 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8325 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8326 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8327 /* complicated encodings */
8328 case 5:
8329 case 13:
8330 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8331 {
8332 if (!pVCpu->iem.s.uRexB)
8333 {
8334 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8335 SET_SS_DEF();
8336 }
8337 else
8338 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8339 }
8340 else
8341 {
8342 uint32_t u32Disp;
8343 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8344 u64EffAddr += (int32_t)u32Disp;
8345 }
8346 break;
8347 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8348 }
8349 break;
8350 }
8351 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8352 }
8353
8354 /* Get and add the displacement. */
8355 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8356 {
8357 case 0:
8358 break;
8359 case 1:
8360 {
8361 int8_t i8Disp;
8362 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8363 u64EffAddr += i8Disp;
8364 break;
8365 }
8366 case 2:
8367 {
8368 uint32_t u32Disp;
8369 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8370 u64EffAddr += (int32_t)u32Disp;
8371 break;
8372 }
8373 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8374 }
8375
8376 }
8377
8378 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8379 *pGCPtrEff = u64EffAddr;
8380 else
8381 {
8382 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8383 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8384 }
8385 }
8386
8387 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8388 return VINF_SUCCESS;
8389}
8390
8391
8392#ifdef IEM_WITH_SETJMP
8393/**
8394 * Calculates the effective address of a ModR/M memory operand.
8395 *
8396 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8397 *
8398 * May longjmp on internal error.
8399 *
8400 * @return The effective address.
8401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8402 * @param bRm The ModRM byte.
8403 * @param cbImmAndRspOffset - First byte: The size of any immediate
8404 * following the effective address opcode bytes
8405 * (only for RIP relative addressing).
8406 * - Second byte: RSP displacement (for POP [ESP]).
8407 */
8408RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
8409{
8410 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
8411# define SET_SS_DEF() \
8412 do \
8413 { \
8414 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8415 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8416 } while (0)
8417
8418 if (!IEM_IS_64BIT_CODE(pVCpu))
8419 {
8420/** @todo Check the effective address size crap! */
8421 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8422 {
8423 uint16_t u16EffAddr;
8424
8425 /* Handle the disp16 form with no registers first. */
8426 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8427 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8428 else
8429 {
8430 /* Get the displacment. */
8431 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8432 {
8433 case 0: u16EffAddr = 0; break;
8434 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8435 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8436 default: AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_1)); /* (caller checked for these) */
8437 }
8438
8439 /* Add the base and index registers to the disp. */
8440 switch (bRm & X86_MODRM_RM_MASK)
8441 {
8442 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8443 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8444 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8445 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8446 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8447 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8448 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8449 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8450 }
8451 }
8452
8453 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
8454 return u16EffAddr;
8455 }
8456
8457 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8458 uint32_t u32EffAddr;
8459
8460 /* Handle the disp32 form with no registers first. */
8461 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8462 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8463 else
8464 {
8465 /* Get the register (or SIB) value. */
8466 switch ((bRm & X86_MODRM_RM_MASK))
8467 {
8468 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8469 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8470 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8471 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8472 case 4: /* SIB */
8473 {
8474 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8475
8476 /* Get the index and scale it. */
8477 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8478 {
8479 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8480 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8481 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8482 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8483 case 4: u32EffAddr = 0; /*none */ break;
8484 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8485 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8486 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8487 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8488 }
8489 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8490
8491 /* add base */
8492 switch (bSib & X86_SIB_BASE_MASK)
8493 {
8494 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8495 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8496 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8497 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8498 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8499 case 5:
8500 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8501 {
8502 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8503 SET_SS_DEF();
8504 }
8505 else
8506 {
8507 uint32_t u32Disp;
8508 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8509 u32EffAddr += u32Disp;
8510 }
8511 break;
8512 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8513 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8514 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8515 }
8516 break;
8517 }
8518 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8519 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8520 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8521 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8522 }
8523
8524 /* Get and add the displacement. */
8525 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8526 {
8527 case 0:
8528 break;
8529 case 1:
8530 {
8531 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8532 u32EffAddr += i8Disp;
8533 break;
8534 }
8535 case 2:
8536 {
8537 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8538 u32EffAddr += u32Disp;
8539 break;
8540 }
8541 default:
8542 AssertFailedStmt(IEM_DO_LONGJMP(pVCpu, VERR_IEM_IPE_2)); /* (caller checked for these) */
8543 }
8544 }
8545
8546 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8547 {
8548 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
8549 return u32EffAddr;
8550 }
8551 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8552 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
8553 return u32EffAddr & UINT16_MAX;
8554 }
8555
8556 uint64_t u64EffAddr;
8557
8558 /* Handle the rip+disp32 form with no registers first. */
8559 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8560 {
8561 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8562 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8563 }
8564 else
8565 {
8566 /* Get the register (or SIB) value. */
8567 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8568 {
8569 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8570 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8571 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8572 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8573 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8574 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8575 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8576 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8577 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8578 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8579 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8580 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8581 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8582 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8583 /* SIB */
8584 case 4:
8585 case 12:
8586 {
8587 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8588
8589 /* Get the index and scale it. */
8590 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8591 {
8592 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8593 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8594 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8595 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8596 case 4: u64EffAddr = 0; /*none */ break;
8597 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8598 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8599 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8600 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8601 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8602 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8603 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8604 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8605 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8606 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8607 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8608 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8609 }
8610 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8611
8612 /* add base */
8613 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8614 {
8615 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8616 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8617 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8618 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8619 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8620 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8621 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8622 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8623 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8624 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8625 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8626 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8627 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8628 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8629 /* complicated encodings */
8630 case 5:
8631 case 13:
8632 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8633 {
8634 if (!pVCpu->iem.s.uRexB)
8635 {
8636 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8637 SET_SS_DEF();
8638 }
8639 else
8640 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8641 }
8642 else
8643 {
8644 uint32_t u32Disp;
8645 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8646 u64EffAddr += (int32_t)u32Disp;
8647 }
8648 break;
8649 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8650 }
8651 break;
8652 }
8653 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
8654 }
8655
8656 /* Get and add the displacement. */
8657 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8658 {
8659 case 0:
8660 break;
8661 case 1:
8662 {
8663 int8_t i8Disp;
8664 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8665 u64EffAddr += i8Disp;
8666 break;
8667 }
8668 case 2:
8669 {
8670 uint32_t u32Disp;
8671 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8672 u64EffAddr += (int32_t)u32Disp;
8673 break;
8674 }
8675 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
8676 }
8677
8678 }
8679
8680 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8681 {
8682 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
8683 return u64EffAddr;
8684 }
8685 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8686 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
8687 return u64EffAddr & UINT32_MAX;
8688}
8689#endif /* IEM_WITH_SETJMP */
8690
8691
8692/**
8693 * Calculates the effective address of a ModR/M memory operand, extended version
8694 * for use in the recompilers.
8695 *
8696 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8697 *
8698 * @return Strict VBox status code.
8699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8700 * @param bRm The ModRM byte.
8701 * @param cbImmAndRspOffset - First byte: The size of any immediate
8702 * following the effective address opcode bytes
8703 * (only for RIP relative addressing).
8704 * - Second byte: RSP displacement (for POP [ESP]).
8705 * @param pGCPtrEff Where to return the effective address.
8706 * @param puInfo Extra info: 32-bit displacement (bits 31:0) and
8707 * SIB byte (bits 39:32).
8708 */
8709VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
8710{
8711 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8712# define SET_SS_DEF() \
8713 do \
8714 { \
8715 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8716 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8717 } while (0)
8718
8719 uint64_t uInfo;
8720 if (!IEM_IS_64BIT_CODE(pVCpu))
8721 {
8722/** @todo Check the effective address size crap! */
8723 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8724 {
8725 uint16_t u16EffAddr;
8726
8727 /* Handle the disp16 form with no registers first. */
8728 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8729 {
8730 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8731 uInfo = u16EffAddr;
8732 }
8733 else
8734 {
8735 /* Get the displacment. */
8736 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8737 {
8738 case 0: u16EffAddr = 0; break;
8739 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8740 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8741 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8742 }
8743 uInfo = u16EffAddr;
8744
8745 /* Add the base and index registers to the disp. */
8746 switch (bRm & X86_MODRM_RM_MASK)
8747 {
8748 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8749 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8750 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8751 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8752 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8753 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8754 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8755 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8756 }
8757 }
8758
8759 *pGCPtrEff = u16EffAddr;
8760 }
8761 else
8762 {
8763 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8764 uint32_t u32EffAddr;
8765
8766 /* Handle the disp32 form with no registers first. */
8767 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8768 {
8769 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8770 uInfo = u32EffAddr;
8771 }
8772 else
8773 {
8774 /* Get the register (or SIB) value. */
8775 uInfo = 0;
8776 switch ((bRm & X86_MODRM_RM_MASK))
8777 {
8778 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8779 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8780 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8781 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8782 case 4: /* SIB */
8783 {
8784 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8785 uInfo = (uint64_t)bSib << 32;
8786
8787 /* Get the index and scale it. */
8788 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8789 {
8790 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8791 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8792 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8793 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8794 case 4: u32EffAddr = 0; /*none */ break;
8795 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8796 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8797 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8798 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8799 }
8800 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8801
8802 /* add base */
8803 switch (bSib & X86_SIB_BASE_MASK)
8804 {
8805 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8806 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8807 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8808 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8809 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8810 case 5:
8811 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8812 {
8813 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8814 SET_SS_DEF();
8815 }
8816 else
8817 {
8818 uint32_t u32Disp;
8819 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8820 u32EffAddr += u32Disp;
8821 uInfo |= u32Disp;
8822 }
8823 break;
8824 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8825 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8826 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8827 }
8828 break;
8829 }
8830 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8831 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8832 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8833 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8834 }
8835
8836 /* Get and add the displacement. */
8837 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8838 {
8839 case 0:
8840 break;
8841 case 1:
8842 {
8843 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8844 u32EffAddr += i8Disp;
8845 uInfo |= (uint32_t)(int32_t)i8Disp;
8846 break;
8847 }
8848 case 2:
8849 {
8850 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8851 u32EffAddr += u32Disp;
8852 uInfo |= (uint32_t)u32Disp;
8853 break;
8854 }
8855 default:
8856 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8857 }
8858
8859 }
8860 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8861 *pGCPtrEff = u32EffAddr;
8862 else
8863 {
8864 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8865 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8866 }
8867 }
8868 }
8869 else
8870 {
8871 uint64_t u64EffAddr;
8872
8873 /* Handle the rip+disp32 form with no registers first. */
8874 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8875 {
8876 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8877 uInfo = (uint32_t)u64EffAddr;
8878 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
8879 }
8880 else
8881 {
8882 /* Get the register (or SIB) value. */
8883 uInfo = 0;
8884 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8885 {
8886 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8887 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8888 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8889 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8890 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8891 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8892 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8893 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8894 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8895 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8896 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8897 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8898 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8899 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8900 /* SIB */
8901 case 4:
8902 case 12:
8903 {
8904 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8905 uInfo = (uint64_t)bSib << 32;
8906
8907 /* Get the index and scale it. */
8908 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8909 {
8910 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8911 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8912 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8913 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8914 case 4: u64EffAddr = 0; /*none */ break;
8915 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8916 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8917 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8918 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8919 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8920 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8921 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8922 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8923 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8924 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8925 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8926 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8927 }
8928 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8929
8930 /* add base */
8931 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8932 {
8933 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8934 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8935 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8936 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8937 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
8938 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8939 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8940 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8941 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8942 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8943 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8944 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8945 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8946 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8947 /* complicated encodings */
8948 case 5:
8949 case 13:
8950 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8951 {
8952 if (!pVCpu->iem.s.uRexB)
8953 {
8954 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8955 SET_SS_DEF();
8956 }
8957 else
8958 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8959 }
8960 else
8961 {
8962 uint32_t u32Disp;
8963 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8964 u64EffAddr += (int32_t)u32Disp;
8965 uInfo |= u32Disp;
8966 }
8967 break;
8968 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8969 }
8970 break;
8971 }
8972 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8973 }
8974
8975 /* Get and add the displacement. */
8976 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8977 {
8978 case 0:
8979 break;
8980 case 1:
8981 {
8982 int8_t i8Disp;
8983 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8984 u64EffAddr += i8Disp;
8985 uInfo |= (uint32_t)(int32_t)i8Disp;
8986 break;
8987 }
8988 case 2:
8989 {
8990 uint32_t u32Disp;
8991 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8992 u64EffAddr += (int32_t)u32Disp;
8993 uInfo |= u32Disp;
8994 break;
8995 }
8996 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8997 }
8998
8999 }
9000
9001 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9002 *pGCPtrEff = u64EffAddr;
9003 else
9004 {
9005 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9006 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9007 }
9008 }
9009 *puInfo = uInfo;
9010
9011 Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
9012 return VINF_SUCCESS;
9013}
9014
9015/** @} */
9016
9017
9018#ifdef LOG_ENABLED
9019/**
9020 * Logs the current instruction.
9021 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9022 * @param fSameCtx Set if we have the same context information as the VMM,
9023 * clear if we may have already executed an instruction in
9024 * our debug context. When clear, we assume IEMCPU holds
9025 * valid CPU mode info.
9026 *
9027 * The @a fSameCtx parameter is now misleading and obsolete.
9028 * @param pszFunction The IEM function doing the execution.
9029 */
9030static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9031{
9032# ifdef IN_RING3
9033 if (LogIs2Enabled())
9034 {
9035 char szInstr[256];
9036 uint32_t cbInstr = 0;
9037 if (fSameCtx)
9038 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9039 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9040 szInstr, sizeof(szInstr), &cbInstr);
9041 else
9042 {
9043 uint32_t fFlags = 0;
9044 switch (IEM_GET_CPU_MODE(pVCpu))
9045 {
9046 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9047 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9048 case IEMMODE_16BIT:
9049 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9050 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9051 else
9052 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9053 break;
9054 }
9055 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9056 szInstr, sizeof(szInstr), &cbInstr);
9057 }
9058
9059 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9060 Log2(("**** %s fExec=%x\n"
9061 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9062 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9063 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9064 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9065 " %s\n"
9066 , pszFunction, pVCpu->iem.s.fExec,
9067 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9068 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9069 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9070 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9071 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9072 szInstr));
9073
9074 if (LogIs3Enabled())
9075 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9076 }
9077 else
9078# endif
9079 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9080 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9081 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9082}
9083#endif /* LOG_ENABLED */
9084
9085
9086#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9087/**
9088 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9089 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9090 *
9091 * @returns Modified rcStrict.
9092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9093 * @param rcStrict The instruction execution status.
9094 */
9095static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9096{
9097 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9098 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9099 {
9100 /* VMX preemption timer takes priority over NMI-window exits. */
9101 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9102 {
9103 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9104 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9105 }
9106 /*
9107 * Check remaining intercepts.
9108 *
9109 * NMI-window and Interrupt-window VM-exits.
9110 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9111 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9112 *
9113 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9114 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9115 */
9116 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9117 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9118 && !TRPMHasTrap(pVCpu))
9119 {
9120 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9121 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9122 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9123 {
9124 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9125 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9126 }
9127 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9128 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9129 {
9130 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9131 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9132 }
9133 }
9134 }
9135 /* TPR-below threshold/APIC write has the highest priority. */
9136 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9137 {
9138 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9139 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9140 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9141 }
9142 /* MTF takes priority over VMX-preemption timer. */
9143 else
9144 {
9145 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9146 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
9147 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9148 }
9149 return rcStrict;
9150}
9151#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9152
9153
9154/**
9155 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9156 * IEMExecOneWithPrefetchedByPC.
9157 *
9158 * Similar code is found in IEMExecLots.
9159 *
9160 * @return Strict VBox status code.
9161 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9162 * @param fExecuteInhibit If set, execute the instruction following CLI,
9163 * POP SS and MOV SS,GR.
9164 * @param pszFunction The calling function name.
9165 */
9166DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9167{
9168 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9169 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9170 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9171 RT_NOREF_PV(pszFunction);
9172
9173#ifdef IEM_WITH_SETJMP
9174 VBOXSTRICTRC rcStrict;
9175 IEM_TRY_SETJMP(pVCpu, rcStrict)
9176 {
9177 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9178 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9179 }
9180 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9181 {
9182 pVCpu->iem.s.cLongJumps++;
9183 }
9184 IEM_CATCH_LONGJMP_END(pVCpu);
9185#else
9186 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9187 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9188#endif
9189 if (rcStrict == VINF_SUCCESS)
9190 pVCpu->iem.s.cInstructions++;
9191 if (pVCpu->iem.s.cActiveMappings > 0)
9192 {
9193 Assert(rcStrict != VINF_SUCCESS);
9194 iemMemRollback(pVCpu);
9195 }
9196 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9197 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9198 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9199
9200//#ifdef DEBUG
9201// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9202//#endif
9203
9204#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9205 /*
9206 * Perform any VMX nested-guest instruction boundary actions.
9207 *
9208 * If any of these causes a VM-exit, we must skip executing the next
9209 * instruction (would run into stale page tables). A VM-exit makes sure
9210 * there is no interrupt-inhibition, so that should ensure we don't go
9211 * to try execute the next instruction. Clearing fExecuteInhibit is
9212 * problematic because of the setjmp/longjmp clobbering above.
9213 */
9214 if ( !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9215 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
9216 || rcStrict != VINF_SUCCESS)
9217 { /* likely */ }
9218 else
9219 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9220#endif
9221
9222 /* Execute the next instruction as well if a cli, pop ss or
9223 mov ss, Gr has just completed successfully. */
9224 if ( fExecuteInhibit
9225 && rcStrict == VINF_SUCCESS
9226 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
9227 {
9228 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
9229 if (rcStrict == VINF_SUCCESS)
9230 {
9231#ifdef LOG_ENABLED
9232 iemLogCurInstr(pVCpu, false, pszFunction);
9233#endif
9234#ifdef IEM_WITH_SETJMP
9235 IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
9236 {
9237 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9238 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9239 }
9240 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9241 {
9242 pVCpu->iem.s.cLongJumps++;
9243 }
9244 IEM_CATCH_LONGJMP_END(pVCpu);
9245#else
9246 IEM_OPCODE_GET_FIRST_U8(&b);
9247 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9248#endif
9249 if (rcStrict == VINF_SUCCESS)
9250 {
9251 pVCpu->iem.s.cInstructions++;
9252#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9253 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9254 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9255 { /* likely */ }
9256 else
9257 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9258#endif
9259 }
9260 if (pVCpu->iem.s.cActiveMappings > 0)
9261 {
9262 Assert(rcStrict != VINF_SUCCESS);
9263 iemMemRollback(pVCpu);
9264 }
9265 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9266 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9267 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9268 }
9269 else if (pVCpu->iem.s.cActiveMappings > 0)
9270 iemMemRollback(pVCpu);
9271 /** @todo drop this after we bake this change into RIP advancing. */
9272 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
9273 }
9274
9275 /*
9276 * Return value fiddling, statistics and sanity assertions.
9277 */
9278 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9279
9280 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9281 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9282 return rcStrict;
9283}
9284
9285
9286/**
9287 * Execute one instruction.
9288 *
9289 * @return Strict VBox status code.
9290 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9291 */
9292VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9293{
9294 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9295#ifdef LOG_ENABLED
9296 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9297#endif
9298
9299 /*
9300 * Do the decoding and emulation.
9301 */
9302 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9303 if (rcStrict == VINF_SUCCESS)
9304 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9305 else if (pVCpu->iem.s.cActiveMappings > 0)
9306 iemMemRollback(pVCpu);
9307
9308 if (rcStrict != VINF_SUCCESS)
9309 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9310 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9311 return rcStrict;
9312}
9313
9314
9315VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9316{
9317 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9318 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9319 if (rcStrict == VINF_SUCCESS)
9320 {
9321 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9322 if (pcbWritten)
9323 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9324 }
9325 else if (pVCpu->iem.s.cActiveMappings > 0)
9326 iemMemRollback(pVCpu);
9327
9328 return rcStrict;
9329}
9330
9331
9332VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9333 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9334{
9335 VBOXSTRICTRC rcStrict;
9336 if ( cbOpcodeBytes
9337 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9338 {
9339 iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
9340#ifdef IEM_WITH_CODE_TLB
9341 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9342 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9343 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9344 pVCpu->iem.s.offCurInstrStart = 0;
9345 pVCpu->iem.s.offInstrNextByte = 0;
9346 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9347#else
9348 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9349 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9350#endif
9351 rcStrict = VINF_SUCCESS;
9352 }
9353 else
9354 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9355 if (rcStrict == VINF_SUCCESS)
9356 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9357 else if (pVCpu->iem.s.cActiveMappings > 0)
9358 iemMemRollback(pVCpu);
9359
9360 return rcStrict;
9361}
9362
9363
9364VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, uint32_t *pcbWritten)
9365{
9366 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9367 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9368 if (rcStrict == VINF_SUCCESS)
9369 {
9370 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9371 if (pcbWritten)
9372 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9373 }
9374 else if (pVCpu->iem.s.cActiveMappings > 0)
9375 iemMemRollback(pVCpu);
9376
9377 return rcStrict;
9378}
9379
9380
9381VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
9382 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9383{
9384 VBOXSTRICTRC rcStrict;
9385 if ( cbOpcodeBytes
9386 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9387 {
9388 iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
9389#ifdef IEM_WITH_CODE_TLB
9390 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9391 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9392 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9393 pVCpu->iem.s.offCurInstrStart = 0;
9394 pVCpu->iem.s.offInstrNextByte = 0;
9395 pVCpu->iem.s.GCPhysInstrBuf = NIL_RTGCPHYS;
9396#else
9397 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9398 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9399#endif
9400 rcStrict = VINF_SUCCESS;
9401 }
9402 else
9403 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
9404 if (rcStrict == VINF_SUCCESS)
9405 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9406 else if (pVCpu->iem.s.cActiveMappings > 0)
9407 iemMemRollback(pVCpu);
9408
9409 return rcStrict;
9410}
9411
9412
9413/**
9414 * For handling split cacheline lock operations when the host has split-lock
9415 * detection enabled.
9416 *
9417 * This will cause the interpreter to disregard the lock prefix and implicit
9418 * locking (xchg).
9419 *
9420 * @returns Strict VBox status code.
9421 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9422 */
9423VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9424{
9425 /*
9426 * Do the decoding and emulation.
9427 */
9428 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
9429 if (rcStrict == VINF_SUCCESS)
9430 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9431 else if (pVCpu->iem.s.cActiveMappings > 0)
9432 iemMemRollback(pVCpu);
9433
9434 if (rcStrict != VINF_SUCCESS)
9435 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9436 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9437 return rcStrict;
9438}
9439
9440
9441/**
9442 * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
9443 * inject a pending TRPM trap.
9444 */
9445VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
9446{
9447 Assert(TRPMHasTrap(pVCpu));
9448
9449 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
9450 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
9451 {
9452 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9453#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9454 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9455 if (fIntrEnabled)
9456 {
9457 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9458 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9459 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9460 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9461 else
9462 {
9463 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9464 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9465 }
9466 }
9467#else
9468 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9469#endif
9470 if (fIntrEnabled)
9471 {
9472 uint8_t u8TrapNo;
9473 TRPMEVENT enmType;
9474 uint32_t uErrCode;
9475 RTGCPTR uCr2;
9476 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
9477 AssertRC(rc2);
9478 Assert(enmType == TRPM_HARDWARE_INT);
9479 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
9480
9481 TRPMResetTrap(pVCpu);
9482
9483#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9484 /* Injecting an event may cause a VM-exit. */
9485 if ( rcStrict != VINF_SUCCESS
9486 && rcStrict != VINF_IEM_RAISED_XCPT)
9487 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9488#else
9489 NOREF(rcStrict);
9490#endif
9491 }
9492 }
9493
9494 return VINF_SUCCESS;
9495}
9496
9497
9498VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9499{
9500 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9501 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9502 Assert(cMaxInstructions > 0);
9503
9504 /*
9505 * See if there is an interrupt pending in TRPM, inject it if we can.
9506 */
9507 /** @todo What if we are injecting an exception and not an interrupt? Is that
9508 * possible here? For now we assert it is indeed only an interrupt. */
9509 if (!TRPMHasTrap(pVCpu))
9510 { /* likely */ }
9511 else
9512 {
9513 VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
9514 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9515 { /*likely */ }
9516 else
9517 return rcStrict;
9518 }
9519
9520 /*
9521 * Initial decoder init w/ prefetch, then setup setjmp.
9522 */
9523 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9524 if (rcStrict == VINF_SUCCESS)
9525 {
9526#ifdef IEM_WITH_SETJMP
9527 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
9528 IEM_TRY_SETJMP(pVCpu, rcStrict)
9529#endif
9530 {
9531 /*
9532 * The run loop. We limit ourselves to 4096 instructions right now.
9533 */
9534 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9535 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9536 for (;;)
9537 {
9538 /*
9539 * Log the state.
9540 */
9541#ifdef LOG_ENABLED
9542 iemLogCurInstr(pVCpu, true, "IEMExecLots");
9543#endif
9544
9545 /*
9546 * Do the decoding and emulation.
9547 */
9548 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9549 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9550#ifdef VBOX_STRICT
9551 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
9552#endif
9553 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9554 {
9555 Assert(pVCpu->iem.s.cActiveMappings == 0);
9556 pVCpu->iem.s.cInstructions++;
9557
9558#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9559 /* Perform any VMX nested-guest instruction boundary actions. */
9560 uint64_t fCpu = pVCpu->fLocalForcedActions;
9561 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9562 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9563 { /* likely */ }
9564 else
9565 {
9566 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9567 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9568 fCpu = pVCpu->fLocalForcedActions;
9569 else
9570 {
9571 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9572 break;
9573 }
9574 }
9575#endif
9576 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9577 {
9578#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9579 uint64_t fCpu = pVCpu->fLocalForcedActions;
9580#endif
9581 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9582 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9583 | VMCPU_FF_TLB_FLUSH
9584 | VMCPU_FF_UNHALT );
9585
9586 if (RT_LIKELY( ( !fCpu
9587 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9588 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
9589 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
9590 {
9591 if (--cMaxInstructionsGccStupidity > 0)
9592 {
9593 /* Poll timers every now an then according to the caller's specs. */
9594 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
9595 || !TMTimerPollBool(pVM, pVCpu))
9596 {
9597 Assert(pVCpu->iem.s.cActiveMappings == 0);
9598 iemReInitDecoder(pVCpu);
9599 continue;
9600 }
9601 }
9602 }
9603 }
9604 Assert(pVCpu->iem.s.cActiveMappings == 0);
9605 }
9606 else if (pVCpu->iem.s.cActiveMappings > 0)
9607 iemMemRollback(pVCpu);
9608 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9609 break;
9610 }
9611 }
9612#ifdef IEM_WITH_SETJMP
9613 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9614 {
9615 if (pVCpu->iem.s.cActiveMappings > 0)
9616 iemMemRollback(pVCpu);
9617# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9618 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9619# endif
9620 pVCpu->iem.s.cLongJumps++;
9621 }
9622 IEM_CATCH_LONGJMP_END(pVCpu);
9623#endif
9624
9625 /*
9626 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9627 */
9628 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9629 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9630 }
9631 else
9632 {
9633 if (pVCpu->iem.s.cActiveMappings > 0)
9634 iemMemRollback(pVCpu);
9635
9636#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9637 /*
9638 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9639 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9640 */
9641 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9642#endif
9643 }
9644
9645 /*
9646 * Maybe re-enter raw-mode and log.
9647 */
9648 if (rcStrict != VINF_SUCCESS)
9649 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9650 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9651 if (pcInstructions)
9652 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
9653 return rcStrict;
9654}
9655
9656
9657/**
9658 * Interface used by EMExecuteExec, does exit statistics and limits.
9659 *
9660 * @returns Strict VBox status code.
9661 * @param pVCpu The cross context virtual CPU structure.
9662 * @param fWillExit To be defined.
9663 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
9664 * @param cMaxInstructions Maximum number of instructions to execute.
9665 * @param cMaxInstructionsWithoutExits
9666 * The max number of instructions without exits.
9667 * @param pStats Where to return statistics.
9668 */
9669VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
9670 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
9671{
9672 NOREF(fWillExit); /** @todo define flexible exit crits */
9673
9674 /*
9675 * Initialize return stats.
9676 */
9677 pStats->cInstructions = 0;
9678 pStats->cExits = 0;
9679 pStats->cMaxExitDistance = 0;
9680 pStats->cReserved = 0;
9681
9682 /*
9683 * Initial decoder init w/ prefetch, then setup setjmp.
9684 */
9685 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
9686 if (rcStrict == VINF_SUCCESS)
9687 {
9688#ifdef IEM_WITH_SETJMP
9689 pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf?!? */
9690 IEM_TRY_SETJMP(pVCpu, rcStrict)
9691#endif
9692 {
9693#ifdef IN_RING0
9694 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
9695#endif
9696 uint32_t cInstructionSinceLastExit = 0;
9697
9698 /*
9699 * The run loop. We limit ourselves to 4096 instructions right now.
9700 */
9701 PVM pVM = pVCpu->CTX_SUFF(pVM);
9702 for (;;)
9703 {
9704 /*
9705 * Log the state.
9706 */
9707#ifdef LOG_ENABLED
9708 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
9709#endif
9710
9711 /*
9712 * Do the decoding and emulation.
9713 */
9714 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
9715
9716 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
9717 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
9718
9719 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
9720 && cInstructionSinceLastExit > 0 /* don't count the first */ )
9721 {
9722 pStats->cExits += 1;
9723 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
9724 pStats->cMaxExitDistance = cInstructionSinceLastExit;
9725 cInstructionSinceLastExit = 0;
9726 }
9727
9728 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9729 {
9730 Assert(pVCpu->iem.s.cActiveMappings == 0);
9731 pVCpu->iem.s.cInstructions++;
9732 pStats->cInstructions++;
9733 cInstructionSinceLastExit++;
9734
9735#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9736 /* Perform any VMX nested-guest instruction boundary actions. */
9737 uint64_t fCpu = pVCpu->fLocalForcedActions;
9738 if (!(fCpu & ( VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9739 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
9740 { /* likely */ }
9741 else
9742 {
9743 rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
9744 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9745 fCpu = pVCpu->fLocalForcedActions;
9746 else
9747 {
9748 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9749 break;
9750 }
9751 }
9752#endif
9753 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
9754 {
9755#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
9756 uint64_t fCpu = pVCpu->fLocalForcedActions;
9757#endif
9758 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
9759 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
9760 | VMCPU_FF_TLB_FLUSH
9761 | VMCPU_FF_UNHALT );
9762 if (RT_LIKELY( ( ( !fCpu
9763 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
9764 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
9765 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
9766 || pStats->cInstructions < cMinInstructions))
9767 {
9768 if (pStats->cInstructions < cMaxInstructions)
9769 {
9770 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
9771 {
9772#ifdef IN_RING0
9773 if ( !fCheckPreemptionPending
9774 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
9775#endif
9776 {
9777 Assert(pVCpu->iem.s.cActiveMappings == 0);
9778 iemReInitDecoder(pVCpu);
9779 continue;
9780 }
9781#ifdef IN_RING0
9782 rcStrict = VINF_EM_RAW_INTERRUPT;
9783 break;
9784#endif
9785 }
9786 }
9787 }
9788 Assert(!(fCpu & VMCPU_FF_IEM));
9789 }
9790 Assert(pVCpu->iem.s.cActiveMappings == 0);
9791 }
9792 else if (pVCpu->iem.s.cActiveMappings > 0)
9793 iemMemRollback(pVCpu);
9794 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9795 break;
9796 }
9797 }
9798#ifdef IEM_WITH_SETJMP
9799 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
9800 {
9801 if (pVCpu->iem.s.cActiveMappings > 0)
9802 iemMemRollback(pVCpu);
9803 pVCpu->iem.s.cLongJumps++;
9804 }
9805 IEM_CATCH_LONGJMP_END(pVCpu);
9806#endif
9807
9808 /*
9809 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
9810 */
9811 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9812 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9813 }
9814 else
9815 {
9816 if (pVCpu->iem.s.cActiveMappings > 0)
9817 iemMemRollback(pVCpu);
9818
9819#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9820 /*
9821 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
9822 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
9823 */
9824 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9825#endif
9826 }
9827
9828 /*
9829 * Maybe re-enter raw-mode and log.
9830 */
9831 if (rcStrict != VINF_SUCCESS)
9832 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
9833 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
9834 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
9835 return rcStrict;
9836}
9837
9838
9839/**
9840 * Injects a trap, fault, abort, software interrupt or external interrupt.
9841 *
9842 * The parameter list matches TRPMQueryTrapAll pretty closely.
9843 *
9844 * @returns Strict VBox status code.
9845 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9846 * @param u8TrapNo The trap number.
9847 * @param enmType What type is it (trap/fault/abort), software
9848 * interrupt or hardware interrupt.
9849 * @param uErrCode The error code if applicable.
9850 * @param uCr2 The CR2 value if applicable.
9851 * @param cbInstr The instruction length (only relevant for
9852 * software interrupts).
9853 */
9854VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
9855 uint8_t cbInstr)
9856{
9857 iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
9858#ifdef DBGFTRACE_ENABLED
9859 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
9860 u8TrapNo, enmType, uErrCode, uCr2);
9861#endif
9862
9863 uint32_t fFlags;
9864 switch (enmType)
9865 {
9866 case TRPM_HARDWARE_INT:
9867 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
9868 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
9869 uErrCode = uCr2 = 0;
9870 break;
9871
9872 case TRPM_SOFTWARE_INT:
9873 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
9874 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
9875 uErrCode = uCr2 = 0;
9876 break;
9877
9878 case TRPM_TRAP:
9879 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
9880 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
9881 if (u8TrapNo == X86_XCPT_PF)
9882 fFlags |= IEM_XCPT_FLAGS_CR2;
9883 switch (u8TrapNo)
9884 {
9885 case X86_XCPT_DF:
9886 case X86_XCPT_TS:
9887 case X86_XCPT_NP:
9888 case X86_XCPT_SS:
9889 case X86_XCPT_PF:
9890 case X86_XCPT_AC:
9891 case X86_XCPT_GP:
9892 fFlags |= IEM_XCPT_FLAGS_ERR;
9893 break;
9894 }
9895 break;
9896
9897 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9898 }
9899
9900 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
9901
9902 if (pVCpu->iem.s.cActiveMappings > 0)
9903 iemMemRollback(pVCpu);
9904
9905 return rcStrict;
9906}
9907
9908
9909/**
9910 * Injects the active TRPM event.
9911 *
9912 * @returns Strict VBox status code.
9913 * @param pVCpu The cross context virtual CPU structure.
9914 */
9915VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
9916{
9917#ifndef IEM_IMPLEMENTS_TASKSWITCH
9918 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
9919#else
9920 uint8_t u8TrapNo;
9921 TRPMEVENT enmType;
9922 uint32_t uErrCode;
9923 RTGCUINTPTR uCr2;
9924 uint8_t cbInstr;
9925 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
9926 if (RT_FAILURE(rc))
9927 return rc;
9928
9929 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
9930 * ICEBP \#DB injection as a special case. */
9931 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
9932#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
9933 if (rcStrict == VINF_SVM_VMEXIT)
9934 rcStrict = VINF_SUCCESS;
9935#endif
9936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9937 if (rcStrict == VINF_VMX_VMEXIT)
9938 rcStrict = VINF_SUCCESS;
9939#endif
9940 /** @todo Are there any other codes that imply the event was successfully
9941 * delivered to the guest? See @bugref{6607}. */
9942 if ( rcStrict == VINF_SUCCESS
9943 || rcStrict == VINF_IEM_RAISED_XCPT)
9944 TRPMResetTrap(pVCpu);
9945
9946 return rcStrict;
9947#endif
9948}
9949
9950
9951VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
9952{
9953 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9954 return VERR_NOT_IMPLEMENTED;
9955}
9956
9957
9958VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
9959{
9960 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
9961 return VERR_NOT_IMPLEMENTED;
9962}
9963
9964
9965/**
9966 * Interface for HM and EM for executing string I/O OUT (write) instructions.
9967 *
9968 * This API ASSUMES that the caller has already verified that the guest code is
9969 * allowed to access the I/O port. (The I/O port is in the DX register in the
9970 * guest state.)
9971 *
9972 * @returns Strict VBox status code.
9973 * @param pVCpu The cross context virtual CPU structure.
9974 * @param cbValue The size of the I/O port access (1, 2, or 4).
9975 * @param enmAddrMode The addressing mode.
9976 * @param fRepPrefix Indicates whether a repeat prefix is used
9977 * (doesn't matter which for this instruction).
9978 * @param cbInstr The instruction length in bytes.
9979 * @param iEffSeg The effective segment address.
9980 * @param fIoChecked Whether the access to the I/O port has been
9981 * checked or not. It's typically checked in the
9982 * HM scenario.
9983 */
9984VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
9985 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
9986{
9987 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
9988 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
9989
9990 /*
9991 * State init.
9992 */
9993 iemInitExec(pVCpu, 0 /*fExecOpts*/);
9994
9995 /*
9996 * Switch orgy for getting to the right handler.
9997 */
9998 VBOXSTRICTRC rcStrict;
9999 if (fRepPrefix)
10000 {
10001 switch (enmAddrMode)
10002 {
10003 case IEMMODE_16BIT:
10004 switch (cbValue)
10005 {
10006 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10007 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10008 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10009 default:
10010 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10011 }
10012 break;
10013
10014 case IEMMODE_32BIT:
10015 switch (cbValue)
10016 {
10017 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10018 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10019 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10020 default:
10021 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10022 }
10023 break;
10024
10025 case IEMMODE_64BIT:
10026 switch (cbValue)
10027 {
10028 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10029 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10030 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10031 default:
10032 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10033 }
10034 break;
10035
10036 default:
10037 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10038 }
10039 }
10040 else
10041 {
10042 switch (enmAddrMode)
10043 {
10044 case IEMMODE_16BIT:
10045 switch (cbValue)
10046 {
10047 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10048 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10049 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10050 default:
10051 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10052 }
10053 break;
10054
10055 case IEMMODE_32BIT:
10056 switch (cbValue)
10057 {
10058 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10059 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10060 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10061 default:
10062 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10063 }
10064 break;
10065
10066 case IEMMODE_64BIT:
10067 switch (cbValue)
10068 {
10069 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10070 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10071 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10072 default:
10073 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10074 }
10075 break;
10076
10077 default:
10078 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10079 }
10080 }
10081
10082 if (pVCpu->iem.s.cActiveMappings)
10083 iemMemRollback(pVCpu);
10084
10085 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10086}
10087
10088
10089/**
10090 * Interface for HM and EM for executing string I/O IN (read) instructions.
10091 *
10092 * This API ASSUMES that the caller has already verified that the guest code is
10093 * allowed to access the I/O port. (The I/O port is in the DX register in the
10094 * guest state.)
10095 *
10096 * @returns Strict VBox status code.
10097 * @param pVCpu The cross context virtual CPU structure.
10098 * @param cbValue The size of the I/O port access (1, 2, or 4).
10099 * @param enmAddrMode The addressing mode.
10100 * @param fRepPrefix Indicates whether a repeat prefix is used
10101 * (doesn't matter which for this instruction).
10102 * @param cbInstr The instruction length in bytes.
10103 * @param fIoChecked Whether the access to the I/O port has been
10104 * checked or not. It's typically checked in the
10105 * HM scenario.
10106 */
10107VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10108 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10109{
10110 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10111
10112 /*
10113 * State init.
10114 */
10115 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10116
10117 /*
10118 * Switch orgy for getting to the right handler.
10119 */
10120 VBOXSTRICTRC rcStrict;
10121 if (fRepPrefix)
10122 {
10123 switch (enmAddrMode)
10124 {
10125 case IEMMODE_16BIT:
10126 switch (cbValue)
10127 {
10128 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10129 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10130 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10131 default:
10132 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10133 }
10134 break;
10135
10136 case IEMMODE_32BIT:
10137 switch (cbValue)
10138 {
10139 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10140 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10141 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10142 default:
10143 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10144 }
10145 break;
10146
10147 case IEMMODE_64BIT:
10148 switch (cbValue)
10149 {
10150 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10151 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10152 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10153 default:
10154 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10155 }
10156 break;
10157
10158 default:
10159 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10160 }
10161 }
10162 else
10163 {
10164 switch (enmAddrMode)
10165 {
10166 case IEMMODE_16BIT:
10167 switch (cbValue)
10168 {
10169 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10170 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10171 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10172 default:
10173 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10174 }
10175 break;
10176
10177 case IEMMODE_32BIT:
10178 switch (cbValue)
10179 {
10180 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10181 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10182 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10183 default:
10184 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10185 }
10186 break;
10187
10188 case IEMMODE_64BIT:
10189 switch (cbValue)
10190 {
10191 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10192 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10193 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10194 default:
10195 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10196 }
10197 break;
10198
10199 default:
10200 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10201 }
10202 }
10203
10204 if ( pVCpu->iem.s.cActiveMappings == 0
10205 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10206 { /* likely */ }
10207 else
10208 {
10209 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10210 iemMemRollback(pVCpu);
10211 }
10212 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10213}
10214
10215
10216/**
10217 * Interface for rawmode to write execute an OUT instruction.
10218 *
10219 * @returns Strict VBox status code.
10220 * @param pVCpu The cross context virtual CPU structure.
10221 * @param cbInstr The instruction length in bytes.
10222 * @param u16Port The port to read.
10223 * @param fImm Whether the port is specified using an immediate operand or
10224 * using the implicit DX register.
10225 * @param cbReg The register size.
10226 *
10227 * @remarks In ring-0 not all of the state needs to be synced in.
10228 */
10229VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10230{
10231 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10232 Assert(cbReg <= 4 && cbReg != 3);
10233
10234 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10235 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, cbReg,
10236 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10237 Assert(!pVCpu->iem.s.cActiveMappings);
10238 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10239}
10240
10241
10242/**
10243 * Interface for rawmode to write execute an IN instruction.
10244 *
10245 * @returns Strict VBox status code.
10246 * @param pVCpu The cross context virtual CPU structure.
10247 * @param cbInstr The instruction length in bytes.
10248 * @param u16Port The port to read.
10249 * @param fImm Whether the port is specified using an immediate operand or
10250 * using the implicit DX.
10251 * @param cbReg The register size.
10252 */
10253VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10254{
10255 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10256 Assert(cbReg <= 4 && cbReg != 3);
10257
10258 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10259 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, cbReg,
10260 ((uint8_t)fImm << 7) | 0xf /** @todo never worked with intercepts */);
10261 Assert(!pVCpu->iem.s.cActiveMappings);
10262 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10263}
10264
10265
10266/**
10267 * Interface for HM and EM to write to a CRx register.
10268 *
10269 * @returns Strict VBox status code.
10270 * @param pVCpu The cross context virtual CPU structure.
10271 * @param cbInstr The instruction length in bytes.
10272 * @param iCrReg The control register number (destination).
10273 * @param iGReg The general purpose register number (source).
10274 *
10275 * @remarks In ring-0 not all of the state needs to be synced in.
10276 */
10277VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10278{
10279 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10280 Assert(iCrReg < 16);
10281 Assert(iGReg < 16);
10282
10283 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10284 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10285 Assert(!pVCpu->iem.s.cActiveMappings);
10286 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10287}
10288
10289
10290/**
10291 * Interface for HM and EM to read from a CRx register.
10292 *
10293 * @returns Strict VBox status code.
10294 * @param pVCpu The cross context virtual CPU structure.
10295 * @param cbInstr The instruction length in bytes.
10296 * @param iGReg The general purpose register number (destination).
10297 * @param iCrReg The control register number (source).
10298 *
10299 * @remarks In ring-0 not all of the state needs to be synced in.
10300 */
10301VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10302{
10303 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10304 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10305 | CPUMCTX_EXTRN_APIC_TPR);
10306 Assert(iCrReg < 16);
10307 Assert(iGReg < 16);
10308
10309 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10310 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10311 Assert(!pVCpu->iem.s.cActiveMappings);
10312 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10313}
10314
10315
10316/**
10317 * Interface for HM and EM to write to a DRx register.
10318 *
10319 * @returns Strict VBox status code.
10320 * @param pVCpu The cross context virtual CPU structure.
10321 * @param cbInstr The instruction length in bytes.
10322 * @param iDrReg The debug register number (destination).
10323 * @param iGReg The general purpose register number (source).
10324 *
10325 * @remarks In ring-0 not all of the state needs to be synced in.
10326 */
10327VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iDrReg, uint8_t iGReg)
10328{
10329 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10330 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10331 Assert(iDrReg < 8);
10332 Assert(iGReg < 16);
10333
10334 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10335 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Dd_Rd, iDrReg, iGReg);
10336 Assert(!pVCpu->iem.s.cActiveMappings);
10337 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10338}
10339
10340
10341/**
10342 * Interface for HM and EM to read from a DRx register.
10343 *
10344 * @returns Strict VBox status code.
10345 * @param pVCpu The cross context virtual CPU structure.
10346 * @param cbInstr The instruction length in bytes.
10347 * @param iGReg The general purpose register number (destination).
10348 * @param iDrReg The debug register number (source).
10349 *
10350 * @remarks In ring-0 not all of the state needs to be synced in.
10351 */
10352VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovDRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iDrReg)
10353{
10354 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10355 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_DR7);
10356 Assert(iDrReg < 8);
10357 Assert(iGReg < 16);
10358
10359 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10360 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Dd, iGReg, iDrReg);
10361 Assert(!pVCpu->iem.s.cActiveMappings);
10362 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10363}
10364
10365
10366/**
10367 * Interface for HM and EM to clear the CR0[TS] bit.
10368 *
10369 * @returns Strict VBox status code.
10370 * @param pVCpu The cross context virtual CPU structure.
10371 * @param cbInstr The instruction length in bytes.
10372 *
10373 * @remarks In ring-0 not all of the state needs to be synced in.
10374 */
10375VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10376{
10377 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10378
10379 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10380 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10381 Assert(!pVCpu->iem.s.cActiveMappings);
10382 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10383}
10384
10385
10386/**
10387 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10388 *
10389 * @returns Strict VBox status code.
10390 * @param pVCpu The cross context virtual CPU structure.
10391 * @param cbInstr The instruction length in bytes.
10392 * @param uValue The value to load into CR0.
10393 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10394 * memory operand. Otherwise pass NIL_RTGCPTR.
10395 *
10396 * @remarks In ring-0 not all of the state needs to be synced in.
10397 */
10398VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10399{
10400 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10401
10402 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10403 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10404 Assert(!pVCpu->iem.s.cActiveMappings);
10405 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10406}
10407
10408
10409/**
10410 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10411 *
10412 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10413 *
10414 * @returns Strict VBox status code.
10415 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10416 * @param cbInstr The instruction length in bytes.
10417 * @remarks In ring-0 not all of the state needs to be synced in.
10418 * @thread EMT(pVCpu)
10419 */
10420VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10421{
10422 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10423
10424 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10425 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10426 Assert(!pVCpu->iem.s.cActiveMappings);
10427 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10428}
10429
10430
10431/**
10432 * Interface for HM and EM to emulate the WBINVD instruction.
10433 *
10434 * @returns Strict VBox status code.
10435 * @param pVCpu The cross context virtual CPU structure.
10436 * @param cbInstr The instruction length in bytes.
10437 *
10438 * @remarks In ring-0 not all of the state needs to be synced in.
10439 */
10440VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10441{
10442 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10443
10444 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10445 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10446 Assert(!pVCpu->iem.s.cActiveMappings);
10447 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10448}
10449
10450
10451/**
10452 * Interface for HM and EM to emulate the INVD instruction.
10453 *
10454 * @returns Strict VBox status code.
10455 * @param pVCpu The cross context virtual CPU structure.
10456 * @param cbInstr The instruction length in bytes.
10457 *
10458 * @remarks In ring-0 not all of the state needs to be synced in.
10459 */
10460VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10461{
10462 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10463
10464 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10465 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10466 Assert(!pVCpu->iem.s.cActiveMappings);
10467 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10468}
10469
10470
10471/**
10472 * Interface for HM and EM to emulate the INVLPG instruction.
10473 *
10474 * @returns Strict VBox status code.
10475 * @retval VINF_PGM_SYNC_CR3
10476 *
10477 * @param pVCpu The cross context virtual CPU structure.
10478 * @param cbInstr The instruction length in bytes.
10479 * @param GCPtrPage The effective address of the page to invalidate.
10480 *
10481 * @remarks In ring-0 not all of the state needs to be synced in.
10482 */
10483VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10484{
10485 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10486
10487 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10488 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10489 Assert(!pVCpu->iem.s.cActiveMappings);
10490 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10491}
10492
10493
10494/**
10495 * Interface for HM and EM to emulate the INVPCID instruction.
10496 *
10497 * @returns Strict VBox status code.
10498 * @retval VINF_PGM_SYNC_CR3
10499 *
10500 * @param pVCpu The cross context virtual CPU structure.
10501 * @param cbInstr The instruction length in bytes.
10502 * @param iEffSeg The effective segment register.
10503 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10504 * @param uType The invalidation type.
10505 *
10506 * @remarks In ring-0 not all of the state needs to be synced in.
10507 */
10508VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10509 uint64_t uType)
10510{
10511 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10512
10513 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10514 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10515 Assert(!pVCpu->iem.s.cActiveMappings);
10516 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10517}
10518
10519
10520/**
10521 * Interface for HM and EM to emulate the CPUID instruction.
10522 *
10523 * @returns Strict VBox status code.
10524 *
10525 * @param pVCpu The cross context virtual CPU structure.
10526 * @param cbInstr The instruction length in bytes.
10527 *
10528 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10529 */
10530VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10531{
10532 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10533 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10534
10535 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10536 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10537 Assert(!pVCpu->iem.s.cActiveMappings);
10538 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10539}
10540
10541
10542/**
10543 * Interface for HM and EM to emulate the RDPMC instruction.
10544 *
10545 * @returns Strict VBox status code.
10546 *
10547 * @param pVCpu The cross context virtual CPU structure.
10548 * @param cbInstr The instruction length in bytes.
10549 *
10550 * @remarks Not all of the state needs to be synced in.
10551 */
10552VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10553{
10554 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10555 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10556
10557 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10558 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10559 Assert(!pVCpu->iem.s.cActiveMappings);
10560 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10561}
10562
10563
10564/**
10565 * Interface for HM and EM to emulate the RDTSC instruction.
10566 *
10567 * @returns Strict VBox status code.
10568 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10569 *
10570 * @param pVCpu The cross context virtual CPU structure.
10571 * @param cbInstr The instruction length in bytes.
10572 *
10573 * @remarks Not all of the state needs to be synced in.
10574 */
10575VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10576{
10577 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10578 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10579
10580 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10581 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10582 Assert(!pVCpu->iem.s.cActiveMappings);
10583 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10584}
10585
10586
10587/**
10588 * Interface for HM and EM to emulate the RDTSCP instruction.
10589 *
10590 * @returns Strict VBox status code.
10591 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10592 *
10593 * @param pVCpu The cross context virtual CPU structure.
10594 * @param cbInstr The instruction length in bytes.
10595 *
10596 * @remarks Not all of the state needs to be synced in. Recommended
10597 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10598 */
10599VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10600{
10601 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10602 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
10603
10604 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10605 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
10606 Assert(!pVCpu->iem.s.cActiveMappings);
10607 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10608}
10609
10610
10611/**
10612 * Interface for HM and EM to emulate the RDMSR instruction.
10613 *
10614 * @returns Strict VBox status code.
10615 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10616 *
10617 * @param pVCpu The cross context virtual CPU structure.
10618 * @param cbInstr The instruction length in bytes.
10619 *
10620 * @remarks Not all of the state needs to be synced in. Requires RCX and
10621 * (currently) all MSRs.
10622 */
10623VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10624{
10625 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10626 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
10627
10628 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10629 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
10630 Assert(!pVCpu->iem.s.cActiveMappings);
10631 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10632}
10633
10634
10635/**
10636 * Interface for HM and EM to emulate the WRMSR instruction.
10637 *
10638 * @returns Strict VBox status code.
10639 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10640 *
10641 * @param pVCpu The cross context virtual CPU structure.
10642 * @param cbInstr The instruction length in bytes.
10643 *
10644 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
10645 * and (currently) all MSRs.
10646 */
10647VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
10648{
10649 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10650 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
10651 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
10652
10653 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10654 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
10655 Assert(!pVCpu->iem.s.cActiveMappings);
10656 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10657}
10658
10659
10660/**
10661 * Interface for HM and EM to emulate the MONITOR instruction.
10662 *
10663 * @returns Strict VBox status code.
10664 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10665 *
10666 * @param pVCpu The cross context virtual CPU structure.
10667 * @param cbInstr The instruction length in bytes.
10668 *
10669 * @remarks Not all of the state needs to be synced in.
10670 * @remarks ASSUMES the default segment of DS and no segment override prefixes
10671 * are used.
10672 */
10673VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
10674{
10675 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10676 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
10677
10678 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10679 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
10680 Assert(!pVCpu->iem.s.cActiveMappings);
10681 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10682}
10683
10684
10685/**
10686 * Interface for HM and EM to emulate the MWAIT instruction.
10687 *
10688 * @returns Strict VBox status code.
10689 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10690 *
10691 * @param pVCpu The cross context virtual CPU structure.
10692 * @param cbInstr The instruction length in bytes.
10693 *
10694 * @remarks Not all of the state needs to be synced in.
10695 */
10696VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
10697{
10698 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10699 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
10700
10701 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10702 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
10703 Assert(!pVCpu->iem.s.cActiveMappings);
10704 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10705}
10706
10707
10708/**
10709 * Interface for HM and EM to emulate the HLT instruction.
10710 *
10711 * @returns Strict VBox status code.
10712 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10713 *
10714 * @param pVCpu The cross context virtual CPU structure.
10715 * @param cbInstr The instruction length in bytes.
10716 *
10717 * @remarks Not all of the state needs to be synced in.
10718 */
10719VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
10720{
10721 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10722
10723 iemInitExec(pVCpu, 0 /*fExecOpts*/);
10724 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
10725 Assert(!pVCpu->iem.s.cActiveMappings);
10726 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10727}
10728
10729
10730/**
10731 * Checks if IEM is in the process of delivering an event (interrupt or
10732 * exception).
10733 *
10734 * @returns true if we're in the process of raising an interrupt or exception,
10735 * false otherwise.
10736 * @param pVCpu The cross context virtual CPU structure.
10737 * @param puVector Where to store the vector associated with the
10738 * currently delivered event, optional.
10739 * @param pfFlags Where to store th event delivery flags (see
10740 * IEM_XCPT_FLAGS_XXX), optional.
10741 * @param puErr Where to store the error code associated with the
10742 * event, optional.
10743 * @param puCr2 Where to store the CR2 associated with the event,
10744 * optional.
10745 * @remarks The caller should check the flags to determine if the error code and
10746 * CR2 are valid for the event.
10747 */
10748VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
10749{
10750 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
10751 if (fRaisingXcpt)
10752 {
10753 if (puVector)
10754 *puVector = pVCpu->iem.s.uCurXcpt;
10755 if (pfFlags)
10756 *pfFlags = pVCpu->iem.s.fCurXcpt;
10757 if (puErr)
10758 *puErr = pVCpu->iem.s.uCurXcptErr;
10759 if (puCr2)
10760 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
10761 }
10762 return fRaisingXcpt;
10763}
10764
10765#ifdef IN_RING3
10766
10767/**
10768 * Handles the unlikely and probably fatal merge cases.
10769 *
10770 * @returns Merged status code.
10771 * @param rcStrict Current EM status code.
10772 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10773 * with @a rcStrict.
10774 * @param iMemMap The memory mapping index. For error reporting only.
10775 * @param pVCpu The cross context virtual CPU structure of the calling
10776 * thread, for error reporting only.
10777 */
10778DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
10779 unsigned iMemMap, PVMCPUCC pVCpu)
10780{
10781 if (RT_FAILURE_NP(rcStrict))
10782 return rcStrict;
10783
10784 if (RT_FAILURE_NP(rcStrictCommit))
10785 return rcStrictCommit;
10786
10787 if (rcStrict == rcStrictCommit)
10788 return rcStrictCommit;
10789
10790 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
10791 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
10792 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
10793 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
10794 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
10795 return VERR_IOM_FF_STATUS_IPE;
10796}
10797
10798
10799/**
10800 * Helper for IOMR3ProcessForceFlag.
10801 *
10802 * @returns Merged status code.
10803 * @param rcStrict Current EM status code.
10804 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
10805 * with @a rcStrict.
10806 * @param iMemMap The memory mapping index. For error reporting only.
10807 * @param pVCpu The cross context virtual CPU structure of the calling
10808 * thread, for error reporting only.
10809 */
10810DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
10811{
10812 /* Simple. */
10813 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
10814 return rcStrictCommit;
10815
10816 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
10817 return rcStrict;
10818
10819 /* EM scheduling status codes. */
10820 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
10821 && rcStrict <= VINF_EM_LAST))
10822 {
10823 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
10824 && rcStrictCommit <= VINF_EM_LAST))
10825 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
10826 }
10827
10828 /* Unlikely */
10829 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
10830}
10831
10832
10833/**
10834 * Called by force-flag handling code when VMCPU_FF_IEM is set.
10835 *
10836 * @returns Merge between @a rcStrict and what the commit operation returned.
10837 * @param pVM The cross context VM structure.
10838 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10839 * @param rcStrict The status code returned by ring-0 or raw-mode.
10840 */
10841VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
10842{
10843 /*
10844 * Reset the pending commit.
10845 */
10846 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
10847 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
10848 ("%#x %#x %#x\n",
10849 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10850 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
10851
10852 /*
10853 * Commit the pending bounce buffers (usually just one).
10854 */
10855 unsigned cBufs = 0;
10856 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
10857 while (iMemMap-- > 0)
10858 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
10859 {
10860 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
10861 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
10862 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
10863
10864 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
10865 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
10866 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
10867
10868 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
10869 {
10870 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
10871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
10872 pbBuf,
10873 cbFirst,
10874 PGMACCESSORIGIN_IEM);
10875 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
10876 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
10877 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
10878 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
10879 }
10880
10881 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
10882 {
10883 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
10884 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
10885 pbBuf + cbFirst,
10886 cbSecond,
10887 PGMACCESSORIGIN_IEM);
10888 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
10889 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
10890 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
10891 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
10892 }
10893 cBufs++;
10894 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
10895 }
10896
10897 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
10898 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
10899 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
10900 pVCpu->iem.s.cActiveMappings = 0;
10901 return rcStrict;
10902}
10903
10904#endif /* IN_RING3 */
10905
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette