VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp

Last change on this file was 104419, checked in by vboxsync, 4 weeks ago

VMM/IEM: Convert near return (retn) and relative/indirect call instructions to special IEM MC statements in order to be able to recompile them, bugref:10376

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 363.4 KB
Line 
1/* $Id: IEMAllCImpl.cpp 104419 2024-04-24 14:32:29Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#define IEM_WITH_OPAQUE_DECODER_STATE
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/cpum.h>
37#include <VBox/vmm/apic.h>
38#include <VBox/vmm/pdm.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/iom.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/hm.h>
43#include <VBox/vmm/nem.h>
44#include <VBox/vmm/gim.h>
45#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
46# include <VBox/vmm/em.h>
47# include <VBox/vmm/hm_svm.h>
48#endif
49#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
50# include <VBox/vmm/hmvmxinline.h>
51#endif
52#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
53# include <VBox/vmm/cpuidcall.h>
54#endif
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/dbgf.h>
57#include <VBox/vmm/dbgftrace.h>
58#include "IEMInternal.h"
59#include <VBox/vmm/vmcc.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <VBox/dis.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72/*********************************************************************************************************************************
73* Defined Constants And Macros *
74*********************************************************************************************************************************/
75/**
76 * Flushes the prefetch buffer, light version.
77 * @todo The \#if conditions here must match the ones in iemOpcodeFlushLight().
78 */
79#ifndef IEM_WITH_CODE_TLB
80# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) iemOpcodeFlushLight(a_pVCpu, a_cbInstr)
81#else
82# define IEM_FLUSH_PREFETCH_LIGHT(a_pVCpu, a_cbInstr) do { } while (0)
83#endif
84
85/**
86 * Flushes the prefetch buffer, heavy version.
87 * @todo The \#if conditions here must match the ones in iemOpcodeFlushHeavy().
88 */
89#if !defined(IEM_WITH_CODE_TLB) || 1
90# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) iemOpcodeFlushHeavy(a_pVCpu, a_cbInstr)
91#else
92# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) do { } while (0)
93#endif
94
95
96
97/** @name Misc Helpers
98 * @{
99 */
100
101
102/**
103 * Worker function for iemHlpCheckPortIOPermission, don't call directly.
104 *
105 * @returns Strict VBox status code.
106 *
107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
108 * @param u16Port The port number.
109 * @param cbOperand The operand size.
110 */
111static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
112{
113 /* The TSS bits we're interested in are the same on 386 and AMD64. */
114 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_BUSY == X86_SEL_TYPE_SYS_386_TSS_BUSY);
115 AssertCompile(AMD64_SEL_TYPE_SYS_TSS_AVAIL == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
116 AssertCompileMembersAtSameOffset(X86TSS32, offIoBitmap, X86TSS64, offIoBitmap);
117 AssertCompile(sizeof(X86TSS32) == sizeof(X86TSS64));
118
119 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
120
121 /*
122 * Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
123 */
124 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
125 if (RT_UNLIKELY( pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_BUSY
126 && pVCpu->cpum.GstCtx.tr.Attr.n.u4Type != AMD64_SEL_TYPE_SYS_TSS_AVAIL))
127 {
128 Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
129 u16Port, cbOperand, pVCpu->cpum.GstCtx.tr.Attr.n.u4Type, pVCpu->cpum.GstCtx.tr.Attr.u));
130 return iemRaiseGeneralProtectionFault0(pVCpu);
131 }
132
133 /*
134 * Read the bitmap offset (may #PF).
135 */
136 uint16_t offBitmap;
137 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &offBitmap, UINT8_MAX,
138 pVCpu->cpum.GstCtx.tr.u64Base + RT_UOFFSETOF(X86TSS64, offIoBitmap));
139 if (rcStrict != VINF_SUCCESS)
140 {
141 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
142 return rcStrict;
143 }
144
145 /*
146 * The bit range from u16Port to (u16Port + cbOperand - 1), however intel
147 * describes the CPU actually reading two bytes regardless of whether the
148 * bit range crosses a byte boundrary. Thus the + 1 in the test below.
149 */
150 uint32_t offFirstBit = (uint32_t)u16Port / 8 + offBitmap;
151 /** @todo check if real CPUs ensures that offBitmap has a minimum value of
152 * for instance sizeof(X86TSS32). */
153 if (offFirstBit + 1 > pVCpu->cpum.GstCtx.tr.u32Limit) /* the limit is inclusive */
154 {
155 Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
156 offFirstBit, pVCpu->cpum.GstCtx.tr.u32Limit));
157 return iemRaiseGeneralProtectionFault0(pVCpu);
158 }
159
160 /*
161 * Read the necessary bits.
162 */
163 /** @todo Test the assertion in the intel manual that the CPU reads two
164 * bytes. The question is how this works wrt to \#PF and \#GP on the
165 * 2nd byte when it's not required. */
166 uint16_t bmBytes = UINT16_MAX;
167 rcStrict = iemMemFetchSysU16(pVCpu, &bmBytes, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + offFirstBit);
168 if (rcStrict != VINF_SUCCESS)
169 {
170 Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
171 return rcStrict;
172 }
173
174 /*
175 * Perform the check.
176 */
177 uint16_t fPortMask = (1 << cbOperand) - 1;
178 bmBytes >>= (u16Port & 7);
179 if (bmBytes & fPortMask)
180 {
181 Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
182 u16Port, cbOperand, bmBytes, fPortMask));
183 return iemRaiseGeneralProtectionFault0(pVCpu);
184 }
185
186 return VINF_SUCCESS;
187}
188
189
190/**
191 * Checks if we are allowed to access the given I/O port, raising the
192 * appropriate exceptions if we aren't (or if the I/O bitmap is not
193 * accessible).
194 *
195 * @returns Strict VBox status code.
196 *
197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
198 * @param u16Port The port number.
199 * @param cbOperand The operand size.
200 */
201DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PVMCPUCC pVCpu, uint16_t u16Port, uint8_t cbOperand)
202{
203 X86EFLAGS Efl;
204 Efl.u = IEMMISC_GET_EFL(pVCpu);
205 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
206 && ( IEM_GET_CPL(pVCpu) > Efl.Bits.u2IOPL
207 || Efl.Bits.u1VM) )
208 return iemHlpCheckPortIOPermissionBitmap(pVCpu, u16Port, cbOperand);
209 return VINF_SUCCESS;
210}
211
212
213#if 0
214/**
215 * Calculates the parity bit.
216 *
217 * @returns true if the bit is set, false if not.
218 * @param u8Result The least significant byte of the result.
219 */
220static bool iemHlpCalcParityFlag(uint8_t u8Result)
221{
222 /*
223 * Parity is set if the number of bits in the least significant byte of
224 * the result is even.
225 */
226 uint8_t cBits;
227 cBits = u8Result & 1; /* 0 */
228 u8Result >>= 1;
229 cBits += u8Result & 1;
230 u8Result >>= 1;
231 cBits += u8Result & 1;
232 u8Result >>= 1;
233 cBits += u8Result & 1;
234 u8Result >>= 1;
235 cBits += u8Result & 1; /* 4 */
236 u8Result >>= 1;
237 cBits += u8Result & 1;
238 u8Result >>= 1;
239 cBits += u8Result & 1;
240 u8Result >>= 1;
241 cBits += u8Result & 1;
242 return !(cBits & 1);
243}
244#endif /* not used */
245
246
247/**
248 * Updates the specified flags according to a 8-bit result.
249 *
250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
251 * @param u8Result The result to set the flags according to.
252 * @param fToUpdate The flags to update.
253 * @param fUndefined The flags that are specified as undefined.
254 */
255static void iemHlpUpdateArithEFlagsU8(PVMCPUCC pVCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
256{
257 uint32_t fEFlags = iemAImpl_test_u8(pVCpu->cpum.GstCtx.eflags.u, &u8Result, u8Result);
258 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
259 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
260}
261
262
263/**
264 * Updates the specified flags according to a 16-bit result.
265 *
266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
267 * @param u16Result The result to set the flags according to.
268 * @param fToUpdate The flags to update.
269 * @param fUndefined The flags that are specified as undefined.
270 */
271static void iemHlpUpdateArithEFlagsU16(PVMCPUCC pVCpu, uint16_t u16Result, uint32_t fToUpdate, uint32_t fUndefined)
272{
273 uint32_t fEFlags = iemAImpl_test_u16(pVCpu->cpum.GstCtx.eflags.u, &u16Result, u16Result);
274 pVCpu->cpum.GstCtx.eflags.u &= ~(fToUpdate | fUndefined);
275 pVCpu->cpum.GstCtx.eflags.u |= (fToUpdate | fUndefined) & fEFlags;
276}
277
278
279/**
280 * Helper used by iret.
281 *
282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
283 * @param uCpl The new CPL.
284 * @param pSReg Pointer to the segment register.
285 */
286static void iemHlpAdjustSelectorForNewCpl(PVMCPUCC pVCpu, uint8_t uCpl, PCPUMSELREG pSReg)
287{
288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
289 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
290
291 if ( uCpl > pSReg->Attr.n.u2Dpl
292 && pSReg->Attr.n.u1DescType /* code or data, not system */
293 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
294 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
295 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, 0);
296}
297
298
299/**
300 * Indicates that we have modified the FPU state.
301 *
302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
303 */
304DECLINLINE(void) iemHlpUsedFpu(PVMCPUCC pVCpu)
305{
306 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
307}
308
309/** @} */
310
311/** @name C Implementations
312 * @{
313 */
314
315
316/**
317 * Implements a pop [mem16].
318 */
319IEM_CIMPL_DEF_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst)
320{
321 uint16_t u16Value;
322 RTUINT64U TmpRsp;
323 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
324 VBOXSTRICTRC rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
325 if (rcStrict == VINF_SUCCESS)
326 {
327 rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
328 if (rcStrict == VINF_SUCCESS)
329 {
330 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
331 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
332 }
333 }
334 return rcStrict;
335
336}
337
338
339/**
340 * Implements a pop [mem32].
341 */
342IEM_CIMPL_DEF_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst)
343{
344 uint32_t u32Value;
345 RTUINT64U TmpRsp;
346 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
347 VBOXSTRICTRC rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
348 if (rcStrict == VINF_SUCCESS)
349 {
350 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEffDst, u32Value);
351 if (rcStrict == VINF_SUCCESS)
352 {
353 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
354 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
355 }
356 }
357 return rcStrict;
358
359}
360
361
362/**
363 * Implements a pop [mem64].
364 */
365IEM_CIMPL_DEF_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst)
366{
367 uint64_t u64Value;
368 RTUINT64U TmpRsp;
369 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
370 VBOXSTRICTRC rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
371 if (rcStrict == VINF_SUCCESS)
372 {
373 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrEffDst, u64Value);
374 if (rcStrict == VINF_SUCCESS)
375 {
376 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
377 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
378 }
379 }
380 return rcStrict;
381
382}
383
384
385/**
386 * Implements a 16-bit popa.
387 */
388IEM_CIMPL_DEF_0(iemCImpl_popa_16)
389{
390 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
391 RTGCPTR GCPtrLast = GCPtrStart + 15;
392 VBOXSTRICTRC rcStrict;
393
394 /*
395 * The docs are a bit hard to comprehend here, but it looks like we wrap
396 * around in real mode as long as none of the individual "popa" crosses the
397 * end of the stack segment. In protected mode we check the whole access
398 * in one go. For efficiency, only do the word-by-word thing if we're in
399 * danger of wrapping around.
400 */
401 /** @todo do popa boundary / wrap-around checks. */
402 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
403 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
404 {
405 /* word-by-word */
406 RTUINT64U TmpRsp;
407 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
408 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.di, &TmpRsp);
409 if (rcStrict == VINF_SUCCESS)
410 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.si, &TmpRsp);
411 if (rcStrict == VINF_SUCCESS)
412 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bp, &TmpRsp);
413 if (rcStrict == VINF_SUCCESS)
414 {
415 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
416 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.bx, &TmpRsp);
417 }
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.dx, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.cx, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 rcStrict = iemMemStackPopU16Ex(pVCpu, &pVCpu->cpum.GstCtx.ax, &TmpRsp);
424 if (rcStrict == VINF_SUCCESS)
425 {
426 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
427 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
428 }
429 }
430 else
431 {
432 uint8_t bUnmapInfo;
433 uint16_t const *pau16Mem = NULL;
434 rcStrict = iemMemMap(pVCpu, (void **)&pau16Mem, &bUnmapInfo, 16, X86_SREG_SS, GCPtrStart,
435 IEM_ACCESS_STACK_R, sizeof(*pau16Mem) - 1);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 pVCpu->cpum.GstCtx.di = pau16Mem[7 - X86_GREG_xDI];
439 pVCpu->cpum.GstCtx.si = pau16Mem[7 - X86_GREG_xSI];
440 pVCpu->cpum.GstCtx.bp = pau16Mem[7 - X86_GREG_xBP];
441 /* skip sp */
442 pVCpu->cpum.GstCtx.bx = pau16Mem[7 - X86_GREG_xBX];
443 pVCpu->cpum.GstCtx.dx = pau16Mem[7 - X86_GREG_xDX];
444 pVCpu->cpum.GstCtx.cx = pau16Mem[7 - X86_GREG_xCX];
445 pVCpu->cpum.GstCtx.ax = pau16Mem[7 - X86_GREG_xAX];
446 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
447 if (rcStrict == VINF_SUCCESS)
448 {
449 iemRegAddToRsp(pVCpu, 16);
450 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
451 }
452 }
453 }
454 return rcStrict;
455}
456
457
458/**
459 * Implements a 32-bit popa.
460 */
461IEM_CIMPL_DEF_0(iemCImpl_popa_32)
462{
463 RTGCPTR GCPtrStart = iemRegGetEffRsp(pVCpu);
464 RTGCPTR GCPtrLast = GCPtrStart + 31;
465 VBOXSTRICTRC rcStrict;
466
467 /*
468 * The docs are a bit hard to comprehend here, but it looks like we wrap
469 * around in real mode as long as none of the individual "popa" crosses the
470 * end of the stack segment. In protected mode we check the whole access
471 * in one go. For efficiency, only do the word-by-word thing if we're in
472 * danger of wrapping around.
473 */
474 /** @todo do popa boundary / wrap-around checks. */
475 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pVCpu)
476 && (pVCpu->cpum.GstCtx.cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
477 {
478 /* word-by-word */
479 RTUINT64U TmpRsp;
480 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
481 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edi, &TmpRsp);
482 if (rcStrict == VINF_SUCCESS)
483 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.esi, &TmpRsp);
484 if (rcStrict == VINF_SUCCESS)
485 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebp, &TmpRsp);
486 if (rcStrict == VINF_SUCCESS)
487 {
488 iemRegAddToRspEx(pVCpu, &TmpRsp, 2); /* sp */
489 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ebx, &TmpRsp);
490 }
491 if (rcStrict == VINF_SUCCESS)
492 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.edx, &TmpRsp);
493 if (rcStrict == VINF_SUCCESS)
494 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.ecx, &TmpRsp);
495 if (rcStrict == VINF_SUCCESS)
496 rcStrict = iemMemStackPopU32Ex(pVCpu, &pVCpu->cpum.GstCtx.eax, &TmpRsp);
497 if (rcStrict == VINF_SUCCESS)
498 {
499#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
500 pVCpu->cpum.GstCtx.rdi &= UINT32_MAX;
501 pVCpu->cpum.GstCtx.rsi &= UINT32_MAX;
502 pVCpu->cpum.GstCtx.rbp &= UINT32_MAX;
503 pVCpu->cpum.GstCtx.rbx &= UINT32_MAX;
504 pVCpu->cpum.GstCtx.rdx &= UINT32_MAX;
505 pVCpu->cpum.GstCtx.rcx &= UINT32_MAX;
506 pVCpu->cpum.GstCtx.rax &= UINT32_MAX;
507#endif
508 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
509 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
510 }
511 }
512 else
513 {
514 uint8_t bUnmapInfo;
515 uint32_t const *pau32Mem;
516 rcStrict = iemMemMap(pVCpu, (void **)&pau32Mem, &bUnmapInfo, 32, X86_SREG_SS, GCPtrStart,
517 IEM_ACCESS_STACK_R, sizeof(*pau32Mem) - 1);
518 if (rcStrict == VINF_SUCCESS)
519 {
520 pVCpu->cpum.GstCtx.rdi = pau32Mem[7 - X86_GREG_xDI];
521 pVCpu->cpum.GstCtx.rsi = pau32Mem[7 - X86_GREG_xSI];
522 pVCpu->cpum.GstCtx.rbp = pau32Mem[7 - X86_GREG_xBP];
523 /* skip esp */
524 pVCpu->cpum.GstCtx.rbx = pau32Mem[7 - X86_GREG_xBX];
525 pVCpu->cpum.GstCtx.rdx = pau32Mem[7 - X86_GREG_xDX];
526 pVCpu->cpum.GstCtx.rcx = pau32Mem[7 - X86_GREG_xCX];
527 pVCpu->cpum.GstCtx.rax = pau32Mem[7 - X86_GREG_xAX];
528 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
529 if (rcStrict == VINF_SUCCESS)
530 {
531 iemRegAddToRsp(pVCpu, 32);
532 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
533 }
534 }
535 }
536 return rcStrict;
537}
538
539
540/**
541 * Implements a 16-bit pusha.
542 */
543IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
544{
545 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
546 RTGCPTR GCPtrBottom = GCPtrTop - 15;
547 VBOXSTRICTRC rcStrict;
548
549 /*
550 * The docs are a bit hard to comprehend here, but it looks like we wrap
551 * around in real mode as long as none of the individual "pushd" crosses the
552 * end of the stack segment. In protected mode we check the whole access
553 * in one go. For efficiency, only do the word-by-word thing if we're in
554 * danger of wrapping around.
555 */
556 /** @todo do pusha boundary / wrap-around checks. */
557 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
558 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
559 {
560 /* word-by-word */
561 RTUINT64U TmpRsp;
562 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
563 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.ax, &TmpRsp);
564 if (rcStrict == VINF_SUCCESS)
565 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.cx, &TmpRsp);
566 if (rcStrict == VINF_SUCCESS)
567 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.dx, &TmpRsp);
568 if (rcStrict == VINF_SUCCESS)
569 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bx, &TmpRsp);
570 if (rcStrict == VINF_SUCCESS)
571 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.sp, &TmpRsp);
572 if (rcStrict == VINF_SUCCESS)
573 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.bp, &TmpRsp);
574 if (rcStrict == VINF_SUCCESS)
575 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.si, &TmpRsp);
576 if (rcStrict == VINF_SUCCESS)
577 rcStrict = iemMemStackPushU16Ex(pVCpu, pVCpu->cpum.GstCtx.di, &TmpRsp);
578 if (rcStrict == VINF_SUCCESS)
579 {
580 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
581 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
582 }
583 }
584 else
585 {
586 GCPtrBottom--;
587 uint8_t bUnmapInfo;
588 uint16_t *pau16Mem = NULL;
589 rcStrict = iemMemMap(pVCpu, (void **)&pau16Mem, &bUnmapInfo, 16, X86_SREG_SS, GCPtrBottom,
590 IEM_ACCESS_STACK_W, sizeof(*pau16Mem) - 1);
591 if (rcStrict == VINF_SUCCESS)
592 {
593 pau16Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.di;
594 pau16Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.si;
595 pau16Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.bp;
596 pau16Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.sp;
597 pau16Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.bx;
598 pau16Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.dx;
599 pau16Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.cx;
600 pau16Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.ax;
601 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
602 if (rcStrict == VINF_SUCCESS)
603 {
604 iemRegSubFromRsp(pVCpu, 16);
605 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
606 }
607 }
608 }
609 return rcStrict;
610}
611
612
613/**
614 * Implements a 32-bit pusha.
615 */
616IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
617{
618 RTGCPTR GCPtrTop = iemRegGetEffRsp(pVCpu);
619 RTGCPTR GCPtrBottom = GCPtrTop - 31;
620 VBOXSTRICTRC rcStrict;
621
622 /*
623 * The docs are a bit hard to comprehend here, but it looks like we wrap
624 * around in real mode as long as none of the individual "pusha" crosses the
625 * end of the stack segment. In protected mode we check the whole access
626 * in one go. For efficiency, only do the word-by-word thing if we're in
627 * danger of wrapping around.
628 */
629 /** @todo do pusha boundary / wrap-around checks. */
630 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
631 && IEM_IS_REAL_OR_V86_MODE(pVCpu) ) )
632 {
633 /* word-by-word */
634 RTUINT64U TmpRsp;
635 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
636 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.eax, &TmpRsp);
637 if (rcStrict == VINF_SUCCESS)
638 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ecx, &TmpRsp);
639 if (rcStrict == VINF_SUCCESS)
640 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edx, &TmpRsp);
641 if (rcStrict == VINF_SUCCESS)
642 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebx, &TmpRsp);
643 if (rcStrict == VINF_SUCCESS)
644 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esp, &TmpRsp);
645 if (rcStrict == VINF_SUCCESS)
646 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.ebp, &TmpRsp);
647 if (rcStrict == VINF_SUCCESS)
648 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.esi, &TmpRsp);
649 if (rcStrict == VINF_SUCCESS)
650 rcStrict = iemMemStackPushU32Ex(pVCpu, pVCpu->cpum.GstCtx.edi, &TmpRsp);
651 if (rcStrict == VINF_SUCCESS)
652 {
653 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
654 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
655 }
656 }
657 else
658 {
659 GCPtrBottom--;
660 uint8_t bUnmapInfo;
661 uint32_t *pau32Mem;
662 rcStrict = iemMemMap(pVCpu, (void **)&pau32Mem, &bUnmapInfo, 32, X86_SREG_SS, GCPtrBottom,
663 IEM_ACCESS_STACK_W, sizeof(*pau32Mem) - 1);
664 if (rcStrict == VINF_SUCCESS)
665 {
666 pau32Mem[7 - X86_GREG_xDI] = pVCpu->cpum.GstCtx.edi;
667 pau32Mem[7 - X86_GREG_xSI] = pVCpu->cpum.GstCtx.esi;
668 pau32Mem[7 - X86_GREG_xBP] = pVCpu->cpum.GstCtx.ebp;
669 pau32Mem[7 - X86_GREG_xSP] = pVCpu->cpum.GstCtx.esp;
670 pau32Mem[7 - X86_GREG_xBX] = pVCpu->cpum.GstCtx.ebx;
671 pau32Mem[7 - X86_GREG_xDX] = pVCpu->cpum.GstCtx.edx;
672 pau32Mem[7 - X86_GREG_xCX] = pVCpu->cpum.GstCtx.ecx;
673 pau32Mem[7 - X86_GREG_xAX] = pVCpu->cpum.GstCtx.eax;
674 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
675 if (rcStrict == VINF_SUCCESS)
676 {
677 iemRegSubFromRsp(pVCpu, 32);
678 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
679 }
680 }
681 }
682 return rcStrict;
683}
684
685
686/**
687 * Implements pushf.
688 *
689 *
690 * @param enmEffOpSize The effective operand size.
691 */
692IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
693{
694 VBOXSTRICTRC rcStrict;
695
696 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_PUSHF))
697 { /* probable */ }
698 else
699 {
700 Log2(("pushf: Guest intercept -> #VMEXIT\n"));
701 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
702 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
703 }
704
705 /*
706 * If we're in V8086 mode some care is required (which is why we're in
707 * doing this in a C implementation).
708 */
709 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
710 if ( (fEfl & X86_EFL_VM)
711 && X86_EFL_GET_IOPL(fEfl) != 3 )
712 {
713 Assert(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE);
714 if ( enmEffOpSize != IEMMODE_16BIT
715 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
716 return iemRaiseGeneralProtectionFault0(pVCpu);
717 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
718 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
719 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
720 }
721 else
722 {
723
724 /*
725 * Ok, clear RF and VM, adjust for ancient CPUs, and push the flags.
726 */
727 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
728
729 switch (enmEffOpSize)
730 {
731 case IEMMODE_16BIT:
732 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
733 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_186)
734 fEfl |= UINT16_C(0xf000);
735 rcStrict = iemMemStackPushU16(pVCpu, (uint16_t)fEfl);
736 break;
737 case IEMMODE_32BIT:
738 rcStrict = iemMemStackPushU32(pVCpu, fEfl);
739 break;
740 case IEMMODE_64BIT:
741 rcStrict = iemMemStackPushU64(pVCpu, fEfl);
742 break;
743 IEM_NOT_REACHED_DEFAULT_CASE_RET();
744 }
745 }
746
747 if (rcStrict == VINF_SUCCESS)
748 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
749 return rcStrict;
750}
751
752
753/**
754 * Implements popf.
755 *
756 * @param enmEffOpSize The effective operand size.
757 */
758IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
759{
760 uint32_t const fEflOld = IEMMISC_GET_EFL(pVCpu);
761 VBOXSTRICTRC rcStrict;
762 uint32_t fEflNew;
763
764 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_POPF))
765 { /* probable */ }
766 else
767 {
768 Log2(("popf: Guest intercept -> #VMEXIT\n"));
769 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
770 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
771 }
772
773 /*
774 * V8086 is special as usual.
775 */
776 if (fEflOld & X86_EFL_VM)
777 {
778 /*
779 * Almost anything goes if IOPL is 3.
780 */
781 if (X86_EFL_GET_IOPL(fEflOld) == 3)
782 {
783 switch (enmEffOpSize)
784 {
785 case IEMMODE_16BIT:
786 {
787 uint16_t u16Value;
788 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
789 if (rcStrict != VINF_SUCCESS)
790 return rcStrict;
791 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
792 break;
793 }
794 case IEMMODE_32BIT:
795 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
796 if (rcStrict != VINF_SUCCESS)
797 return rcStrict;
798 break;
799 IEM_NOT_REACHED_DEFAULT_CASE_RET();
800 }
801
802 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
803 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
804 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
805 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
806 }
807 /*
808 * Interrupt flag virtualization with CR4.VME=1.
809 */
810 else if ( enmEffOpSize == IEMMODE_16BIT
811 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
812 {
813 uint16_t u16Value;
814 RTUINT64U TmpRsp;
815 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
816 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Value, &TmpRsp);
817 if (rcStrict != VINF_SUCCESS)
818 return rcStrict;
819
820 /** @todo Is the popf VME \#GP(0) delivered after updating RSP+RIP
821 * or before? */
822 if ( ( (u16Value & X86_EFL_IF)
823 && (fEflOld & X86_EFL_VIP))
824 || (u16Value & X86_EFL_TF) )
825 return iemRaiseGeneralProtectionFault0(pVCpu);
826
827 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
828 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
829 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
830 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
831
832 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
833 }
834 else
835 return iemRaiseGeneralProtectionFault0(pVCpu);
836
837 }
838 /*
839 * Not in V8086 mode.
840 */
841 else
842 {
843 /* Pop the flags. */
844 switch (enmEffOpSize)
845 {
846 case IEMMODE_16BIT:
847 {
848 uint16_t u16Value;
849 rcStrict = iemMemStackPopU16(pVCpu, &u16Value);
850 if (rcStrict != VINF_SUCCESS)
851 return rcStrict;
852 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
853
854 /*
855 * Ancient CPU adjustments:
856 * - 8086, 80186, V20/30:
857 * Fixed bits 15:12 bits are not kept correctly internally, mostly for
858 * practical reasons (masking below). We add them when pushing flags.
859 * - 80286:
860 * The NT and IOPL flags cannot be popped from real mode and are
861 * therefore always zero (since a 286 can never exit from PM and
862 * their initial value is zero). This changed on a 386 and can
863 * therefore be used to detect 286 or 386 CPU in real mode.
864 */
865 if ( IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286
866 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
867 fEflNew &= ~(X86_EFL_NT | X86_EFL_IOPL);
868 break;
869 }
870 case IEMMODE_32BIT:
871 rcStrict = iemMemStackPopU32(pVCpu, &fEflNew);
872 if (rcStrict != VINF_SUCCESS)
873 return rcStrict;
874 break;
875 case IEMMODE_64BIT:
876 {
877 uint64_t u64Value;
878 rcStrict = iemMemStackPopU64(pVCpu, &u64Value);
879 if (rcStrict != VINF_SUCCESS)
880 return rcStrict;
881 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
882 break;
883 }
884 IEM_NOT_REACHED_DEFAULT_CASE_RET();
885 }
886
887 /* Merge them with the current flags. */
888 const uint32_t fPopfBits = pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.enmMicroarch != kCpumMicroarch_Intel_80386
889 ? X86_EFL_POPF_BITS : X86_EFL_POPF_BITS_386;
890 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
891 || IEM_GET_CPL(pVCpu) == 0)
892 {
893 fEflNew &= fPopfBits;
894 fEflNew |= ~fPopfBits & fEflOld;
895 }
896 else if (IEM_GET_CPL(pVCpu) <= X86_EFL_GET_IOPL(fEflOld))
897 {
898 fEflNew &= fPopfBits & ~(X86_EFL_IOPL);
899 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL)) & fEflOld;
900 }
901 else
902 {
903 fEflNew &= fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF);
904 fEflNew |= ~(fPopfBits & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
905 }
906 }
907
908 /*
909 * Commit the flags.
910 */
911 Assert(fEflNew & RT_BIT_32(1));
912 IEMMISC_SET_EFL(pVCpu, fEflNew);
913 return iemRegAddToRipAndFinishingClearingRfEx(pVCpu, cbInstr, fEflOld);
914}
915
916
917/**
918 * Implements far jumps and calls thru task segments (TSS).
919 *
920 * @returns VBox strict status code.
921 * @param pVCpu The cross context virtual CPU structure of the
922 * calling thread.
923 * @param cbInstr The current instruction length.
924 * @param uSel The selector.
925 * @param enmBranch The kind of branching we're performing.
926 * @param enmEffOpSize The effective operand size.
927 * @param pDesc The descriptor corresponding to @a uSel. The type is
928 * task gate.
929 */
930static VBOXSTRICTRC iemCImpl_BranchTaskSegment(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
931 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
932{
933#ifndef IEM_IMPLEMENTS_TASKSWITCH
934 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
935#else
936 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
937 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
938 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL);
939 RT_NOREF_PV(enmEffOpSize);
940 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
941
942 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
943 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
944 {
945 Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
946 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
947 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
948 }
949
950 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
951 * far calls (see iemCImpl_callf). Most likely in both cases it should be
952 * checked here, need testcases. */
953 if (!pDesc->Legacy.Gen.u1Present)
954 {
955 Log(("BranchTaskSegment TSS not present uSel=%04x -> #NP\n", uSel));
956 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
957 }
958
959 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
960 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
961 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSel, pDesc);
962#endif
963}
964
965
966/**
967 * Implements far jumps and calls thru task gates.
968 *
969 * @returns VBox strict status code.
970 * @param pVCpu The cross context virtual CPU structure of the
971 * calling thread.
972 * @param cbInstr The current instruction length.
973 * @param uSel The selector.
974 * @param enmBranch The kind of branching we're performing.
975 * @param enmEffOpSize The effective operand size.
976 * @param pDesc The descriptor corresponding to @a uSel. The type is
977 * task gate.
978 */
979static VBOXSTRICTRC iemCImpl_BranchTaskGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
980 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
981{
982#ifndef IEM_IMPLEMENTS_TASKSWITCH
983 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
984#else
985 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
986 RT_NOREF_PV(enmEffOpSize);
987 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
988
989 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
990 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
991 {
992 Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
993 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
994 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
995 }
996
997 /** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
998 * far calls (see iemCImpl_callf). Most likely in both cases it should be
999 * checked here, need testcases. */
1000 if (!pDesc->Legacy.Gen.u1Present)
1001 {
1002 Log(("BranchTaskSegment segment not present uSel=%04x -> #NP\n", uSel));
1003 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1004 }
1005
1006 /*
1007 * Fetch the new TSS descriptor from the GDT.
1008 */
1009 RTSEL uSelTss = pDesc->Legacy.Gate.u16Sel;
1010 if (uSelTss & X86_SEL_LDT)
1011 {
1012 Log(("BranchTaskGate TSS is in LDT. uSel=%04x uSelTss=%04x -> #GP\n", uSel, uSelTss));
1013 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1014 }
1015
1016 IEMSELDESC TssDesc;
1017 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelTss, X86_XCPT_GP);
1018 if (rcStrict != VINF_SUCCESS)
1019 return rcStrict;
1020
1021 if (TssDesc.Legacy.Gate.u4Type & X86_SEL_TYPE_SYS_TSS_BUSY_MASK)
1022 {
1023 Log(("BranchTaskGate TSS is busy. uSel=%04x uSelTss=%04x DescType=%#x -> #GP\n", uSel, uSelTss,
1024 TssDesc.Legacy.Gate.u4Type));
1025 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
1026 }
1027
1028 if (!TssDesc.Legacy.Gate.u1Present)
1029 {
1030 Log(("BranchTaskGate TSS is not present. uSel=%04x uSelTss=%04x -> #NP\n", uSel, uSelTss));
1031 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelTss & X86_SEL_MASK_OFF_RPL);
1032 }
1033
1034 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
1035 return iemTaskSwitch(pVCpu, enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
1036 uNextEip, 0 /* fFlags */, 0 /* uErr */, 0 /* uCr2 */, uSelTss, &TssDesc);
1037#endif
1038}
1039
1040
1041/**
1042 * Implements far jumps and calls thru call gates.
1043 *
1044 * @returns VBox strict status code.
1045 * @param pVCpu The cross context virtual CPU structure of the
1046 * calling thread.
1047 * @param cbInstr The current instruction length.
1048 * @param uSel The selector.
1049 * @param enmBranch The kind of branching we're performing.
1050 * @param enmEffOpSize The effective operand size.
1051 * @param pDesc The descriptor corresponding to @a uSel. The type is
1052 * call gate.
1053 */
1054static VBOXSTRICTRC iemCImpl_BranchCallGate(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1055 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1056{
1057#define IEM_IMPLEMENTS_CALLGATE
1058#ifndef IEM_IMPLEMENTS_CALLGATE
1059 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
1060#else
1061 RT_NOREF_PV(enmEffOpSize);
1062 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1063
1064 /* NB: Far jumps can only do intra-privilege transfers. Far calls support
1065 * inter-privilege calls and are much more complex.
1066 *
1067 * NB: 64-bit call gate has the same type as a 32-bit call gate! If
1068 * EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
1069 * must be 16-bit or 32-bit.
1070 */
1071 /** @todo effective operand size is probably irrelevant here, only the
1072 * call gate bitness matters??
1073 */
1074 VBOXSTRICTRC rcStrict;
1075 RTPTRUNION uPtrRet;
1076 uint64_t uNewRsp;
1077 uint64_t uNewRip;
1078 uint64_t u64Base;
1079 uint32_t cbLimit;
1080 RTSEL uNewCS;
1081 IEMSELDESC DescCS;
1082
1083 AssertCompile(X86_SEL_TYPE_SYS_386_CALL_GATE == AMD64_SEL_TYPE_SYS_CALL_GATE);
1084 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1085 Assert( pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE
1086 || pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE);
1087
1088 /* Determine the new instruction pointer from the gate descriptor. */
1089 uNewRip = pDesc->Legacy.Gate.u16OffsetLow
1090 | ((uint32_t)pDesc->Legacy.Gate.u16OffsetHigh << 16)
1091 | ((uint64_t)pDesc->Long.Gate.u32OffsetTop << 32);
1092
1093 /* Perform DPL checks on the gate descriptor. */
1094 if ( pDesc->Legacy.Gate.u2Dpl < IEM_GET_CPL(pVCpu)
1095 || pDesc->Legacy.Gate.u2Dpl < (uSel & X86_SEL_RPL))
1096 {
1097 Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
1098 IEM_GET_CPL(pVCpu), (uSel & X86_SEL_RPL)));
1099 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1100 }
1101
1102 /** @todo does this catch NULL selectors, too? */
1103 if (!pDesc->Legacy.Gen.u1Present)
1104 {
1105 Log(("BranchCallGate Gate not present uSel=%04x -> #NP\n", uSel));
1106 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1107 }
1108
1109 /*
1110 * Fetch the target CS descriptor from the GDT or LDT.
1111 */
1112 uNewCS = pDesc->Legacy.Gate.u16Sel;
1113 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_GP);
1114 if (rcStrict != VINF_SUCCESS)
1115 return rcStrict;
1116
1117 /* Target CS must be a code selector. */
1118 if ( !DescCS.Legacy.Gen.u1DescType
1119 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1120 {
1121 Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1122 uNewCS, uNewRip, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1123 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1124 }
1125
1126 /* Privilege checks on target CS. */
1127 if (enmBranch == IEMBRANCH_JUMP)
1128 {
1129 if (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1130 {
1131 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1132 {
1133 Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1134 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1135 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1136 }
1137 }
1138 else
1139 {
1140 if (DescCS.Legacy.Gen.u2Dpl != IEM_GET_CPL(pVCpu))
1141 {
1142 Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1143 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1144 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1145 }
1146 }
1147 }
1148 else
1149 {
1150 Assert(enmBranch == IEMBRANCH_CALL);
1151 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1152 {
1153 Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
1154 uNewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1155 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1156 }
1157 }
1158
1159 /* Additional long mode checks. */
1160 if (IEM_IS_LONG_MODE(pVCpu))
1161 {
1162 if (!DescCS.Legacy.Gen.u1Long)
1163 {
1164 Log(("BranchCallGate uNewCS %04x -> not a 64-bit code segment.\n", uNewCS));
1165 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1166 }
1167
1168 /* L vs D. */
1169 if ( DescCS.Legacy.Gen.u1Long
1170 && DescCS.Legacy.Gen.u1DefBig)
1171 {
1172 Log(("BranchCallGate uNewCS %04x -> both L and D are set.\n", uNewCS));
1173 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCS);
1174 }
1175 }
1176
1177 if (!DescCS.Legacy.Gate.u1Present)
1178 {
1179 Log(("BranchCallGate target CS is not present. uSel=%04x uNewCS=%04x -> #NP(CS)\n", uSel, uNewCS));
1180 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCS);
1181 }
1182
1183 if (enmBranch == IEMBRANCH_JUMP)
1184 {
1185 /** @todo This is very similar to regular far jumps; merge! */
1186 /* Jumps are fairly simple... */
1187
1188 /* Chop the high bits off if 16-bit gate (Intel says so). */
1189 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1190 uNewRip = (uint16_t)uNewRip;
1191
1192 /* Limit check for non-long segments. */
1193 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1194 if (DescCS.Legacy.Gen.u1Long)
1195 u64Base = 0;
1196 else
1197 {
1198 if (uNewRip > cbLimit)
1199 {
1200 Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
1201 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1202 }
1203 u64Base = X86DESC_BASE(&DescCS.Legacy);
1204 }
1205
1206 /* Canonical address check. */
1207 if (!IEM_IS_CANONICAL(uNewRip))
1208 {
1209 Log(("BranchCallGate jump %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1210 return iemRaiseNotCanonical(pVCpu);
1211 }
1212
1213 /*
1214 * Ok, everything checked out fine. Now set the accessed bit before
1215 * committing the result into CS, CSHID and RIP.
1216 */
1217 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1218 {
1219 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1220 if (rcStrict != VINF_SUCCESS)
1221 return rcStrict;
1222 /** @todo check what VT-x and AMD-V does. */
1223 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1224 }
1225
1226 /* commit */
1227 pVCpu->cpum.GstCtx.rip = uNewRip;
1228 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1229 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu); /** @todo is this right for conforming segs? or in general? */
1230 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1231 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1232 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1233 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1234 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1235 }
1236 else
1237 {
1238 Assert(enmBranch == IEMBRANCH_CALL);
1239 /* Calls are much more complicated. */
1240
1241 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF) && (DescCS.Legacy.Gen.u2Dpl < IEM_GET_CPL(pVCpu)))
1242 {
1243 /* More privilege. This is the fun part. */
1244 Assert(!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)); /* Filtered out above. */
1245
1246 /*
1247 * Determine new SS:rSP from the TSS.
1248 */
1249 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType);
1250
1251 /* Figure out where the new stack pointer is stored in the TSS. */
1252 uint8_t const uNewCSDpl = DescCS.Legacy.Gen.u2Dpl;
1253 uint16_t offNewStack; /* Offset of new stack in TSS. */
1254 uint16_t cbNewStack; /* Number of bytes the stack information takes up in TSS. */
1255 if (!IEM_IS_LONG_MODE(pVCpu))
1256 {
1257 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1258 {
1259 offNewStack = RT_UOFFSETOF(X86TSS32, esp0) + uNewCSDpl * 8;
1260 cbNewStack = RT_SIZEOFMEMB(X86TSS32, esp0) + RT_SIZEOFMEMB(X86TSS32, ss0);
1261 }
1262 else
1263 {
1264 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1265 offNewStack = RT_UOFFSETOF(X86TSS16, sp0) + uNewCSDpl * 4;
1266 cbNewStack = RT_SIZEOFMEMB(X86TSS16, sp0) + RT_SIZEOFMEMB(X86TSS16, ss0);
1267 }
1268 }
1269 else
1270 {
1271 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1272 offNewStack = RT_UOFFSETOF(X86TSS64, rsp0) + uNewCSDpl * RT_SIZEOFMEMB(X86TSS64, rsp0);
1273 cbNewStack = RT_SIZEOFMEMB(X86TSS64, rsp0);
1274 }
1275
1276 /* Check against TSS limit. */
1277 if ((uint16_t)(offNewStack + cbNewStack - 1) > pVCpu->cpum.GstCtx.tr.u32Limit)
1278 {
1279 Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pVCpu->cpum.GstCtx.tr.u32Limit));
1280 return iemRaiseTaskSwitchFaultBySelector(pVCpu, pVCpu->cpum.GstCtx.tr.Sel);
1281 }
1282
1283 uint8_t bUnmapInfo;
1284 RTPTRUNION uPtrTss;
1285 RTGCPTR GCPtrTss = pVCpu->cpum.GstCtx.tr.u64Base + offNewStack;
1286 rcStrict = iemMemMap(pVCpu, &uPtrTss.pv, &bUnmapInfo, cbNewStack, UINT8_MAX, GCPtrTss, IEM_ACCESS_SYS_R, 0);
1287 if (rcStrict != VINF_SUCCESS)
1288 {
1289 Log(("BranchCallGate: TSS mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1290 return rcStrict;
1291 }
1292
1293 RTSEL uNewSS;
1294 if (!IEM_IS_LONG_MODE(pVCpu))
1295 {
1296 if (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY)
1297 {
1298 uNewRsp = uPtrTss.pu32[0];
1299 uNewSS = uPtrTss.pu16[2];
1300 }
1301 else
1302 {
1303 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY);
1304 uNewRsp = uPtrTss.pu16[0];
1305 uNewSS = uPtrTss.pu16[1];
1306 }
1307 }
1308 else
1309 {
1310 Assert(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY);
1311 /* SS will be a NULL selector, but that's valid. */
1312 uNewRsp = uPtrTss.pu64[0];
1313 uNewSS = uNewCSDpl;
1314 }
1315
1316 /* Done with the TSS now. */
1317 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1318 if (rcStrict != VINF_SUCCESS)
1319 {
1320 Log(("BranchCallGate: TSS unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1321 return rcStrict;
1322 }
1323
1324 /* Only used outside of long mode. */
1325 uint8_t const cbWords = pDesc->Legacy.Gate.u5ParmCount;
1326
1327 /* If EFER.LMA is 0, there's extra work to do. */
1328 IEMSELDESC DescSS;
1329 if (!IEM_IS_LONG_MODE(pVCpu))
1330 {
1331 if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
1332 {
1333 Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
1334 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1335 }
1336
1337 /* Grab the new SS descriptor. */
1338 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1339 if (rcStrict != VINF_SUCCESS)
1340 return rcStrict;
1341
1342 /* Ensure that CS.DPL == SS.RPL == SS.DPL. */
1343 if ( (DescCS.Legacy.Gen.u2Dpl != (uNewSS & X86_SEL_RPL))
1344 || (DescCS.Legacy.Gen.u2Dpl != DescSS.Legacy.Gen.u2Dpl))
1345 {
1346 Log(("BranchCallGate call bad RPL/DPL uNewSS=%04x SS DPL=%d CS DPL=%u -> #TS(NewSS)\n",
1347 uNewSS, DescCS.Legacy.Gen.u2Dpl, DescCS.Legacy.Gen.u2Dpl));
1348 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1349 }
1350
1351 /* Ensure new SS is a writable data segment. */
1352 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
1353 {
1354 Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
1355 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uNewSS);
1356 }
1357
1358 if (!DescSS.Legacy.Gen.u1Present)
1359 {
1360 Log(("BranchCallGate New stack not present uSel=%04x -> #SS(NewSS)\n", uNewSS));
1361 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
1362 }
1363 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1364 cbNewStack = (uint16_t)sizeof(uint32_t) * (4 + cbWords);
1365 else
1366 cbNewStack = (uint16_t)sizeof(uint16_t) * (4 + cbWords);
1367 }
1368 else
1369 {
1370 /* Just grab the new (NULL) SS descriptor. */
1371 /** @todo testcase: Check whether the zero GDT entry is actually loaded here
1372 * like we do... */
1373 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_SS);
1374 if (rcStrict != VINF_SUCCESS)
1375 return rcStrict;
1376
1377 cbNewStack = sizeof(uint64_t) * 4;
1378 }
1379
1380 /** @todo According to Intel, new stack is checked for enough space first,
1381 * then switched. According to AMD, the stack is switched first and
1382 * then pushes might fault!
1383 * NB: OS/2 Warp 3/4 actively relies on the fact that possible
1384 * incoming stack \#PF happens before actual stack switch. AMD is
1385 * either lying or implicitly assumes that new state is committed
1386 * only if and when an instruction doesn't fault.
1387 */
1388
1389 /** @todo According to AMD, CS is loaded first, then SS.
1390 * According to Intel, it's the other way around!?
1391 */
1392
1393 /** @todo Intel and AMD disagree on when exactly the CPL changes! */
1394
1395 /* Set the accessed bit before committing new SS. */
1396 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1397 {
1398 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1399 if (rcStrict != VINF_SUCCESS)
1400 return rcStrict;
1401 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1402 }
1403
1404 /* Remember the old SS:rSP and their linear address. */
1405 RTSEL const uOldSS = pVCpu->cpum.GstCtx.ss.Sel;
1406 uint64_t const uOldRsp = pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig ? pVCpu->cpum.GstCtx.rsp : pVCpu->cpum.GstCtx.sp;
1407
1408 RTGCPTR const GCPtrParmWds = pVCpu->cpum.GstCtx.ss.u64Base + uOldRsp;
1409
1410 /* HACK ALERT! Probe if the write to the new stack will succeed. May #SS(NewSS)
1411 or #PF, the former is not implemented in this workaround. */
1412 /** @todo Proper fix callgate target stack exceptions. */
1413 /** @todo testcase: Cover callgates with partially or fully inaccessible
1414 * target stacks. */
1415 void *pvNewFrame;
1416 RTGCPTR GCPtrNewStack = X86DESC_BASE(&DescSS.Legacy) + uNewRsp - cbNewStack;
1417 rcStrict = iemMemMap(pVCpu, &pvNewFrame, &bUnmapInfo, cbNewStack, UINT8_MAX, GCPtrNewStack, IEM_ACCESS_SYS_RW, 0);
1418 if (rcStrict != VINF_SUCCESS)
1419 {
1420 Log(("BranchCallGate: Incoming stack (%04x:%08RX64) not accessible, rc=%Rrc\n", uNewSS, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
1421 return rcStrict;
1422 }
1423 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1424 if (rcStrict != VINF_SUCCESS)
1425 {
1426 Log(("BranchCallGate: New stack probe unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1427 return rcStrict;
1428 }
1429
1430 /* Commit new SS:rSP. */
1431 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1432 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1433 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1434 pVCpu->cpum.GstCtx.ss.u32Limit = X86DESC_LIMIT_G(&DescSS.Legacy);
1435 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1436 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1437 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1438 IEM_SET_CPL(pVCpu, uNewCSDpl); /** @todo Are the parameter words accessed using the new CPL or the old CPL? */
1439 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1440 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1441
1442 /* At this point the stack access must not fail because new state was already committed. */
1443 /** @todo this can still fail due to SS.LIMIT not check. */
1444 uint8_t bUnmapInfoRet;
1445 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbNewStack,
1446 IEM_IS_LONG_MODE(pVCpu) ? 7
1447 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 1,
1448 &uPtrRet.pv, &bUnmapInfoRet, &uNewRsp);
1449 AssertMsgReturn(rcStrict == VINF_SUCCESS, ("BranchCallGate: New stack mapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)),
1450 VERR_INTERNAL_ERROR_5);
1451
1452 if (!IEM_IS_LONG_MODE(pVCpu))
1453 {
1454 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1455 {
1456 if (cbWords)
1457 {
1458 /* Map the relevant chunk of the old stack. */
1459 RTPTRUNION uPtrParmWds;
1460 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, &bUnmapInfo, cbWords * 4, UINT8_MAX, GCPtrParmWds,
1461 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1462 if (rcStrict != VINF_SUCCESS)
1463 {
1464 Log(("BranchCallGate: Old stack mapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1465 return rcStrict;
1466 }
1467
1468 /* Copy the parameter (d)words. */
1469 for (int i = 0; i < cbWords; ++i)
1470 uPtrRet.pu32[2 + i] = uPtrParmWds.pu32[i];
1471
1472 /* Unmap the old stack. */
1473 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1474 if (rcStrict != VINF_SUCCESS)
1475 {
1476 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1477 return rcStrict;
1478 }
1479 }
1480
1481 /* Push the old CS:rIP. */
1482 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1483 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1484
1485 /* Push the old SS:rSP. */
1486 uPtrRet.pu32[2 + cbWords + 0] = uOldRsp;
1487 uPtrRet.pu32[2 + cbWords + 1] = uOldSS;
1488 }
1489 else
1490 {
1491 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1492
1493 if (cbWords)
1494 {
1495 /* Map the relevant chunk of the old stack. */
1496 RTPTRUNION uPtrParmWds;
1497 rcStrict = iemMemMap(pVCpu, &uPtrParmWds.pv, &bUnmapInfo, cbWords * 2, UINT8_MAX, GCPtrParmWds,
1498 IEM_ACCESS_DATA_R, 0 /** @todo Can uNewCSDpl == 3? Then we need alignment mask here! */);
1499 if (rcStrict != VINF_SUCCESS)
1500 {
1501 Log(("BranchCallGate: Old stack mapping (16-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1502 return rcStrict;
1503 }
1504
1505 /* Copy the parameter words. */
1506 for (int i = 0; i < cbWords; ++i)
1507 uPtrRet.pu16[2 + i] = uPtrParmWds.pu16[i];
1508
1509 /* Unmap the old stack. */
1510 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
1511 if (rcStrict != VINF_SUCCESS)
1512 {
1513 Log(("BranchCallGate: Old stack unmapping (32-bit) failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1514 return rcStrict;
1515 }
1516 }
1517
1518 /* Push the old CS:rIP. */
1519 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1520 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1521
1522 /* Push the old SS:rSP. */
1523 uPtrRet.pu16[2 + cbWords + 0] = uOldRsp;
1524 uPtrRet.pu16[2 + cbWords + 1] = uOldSS;
1525 }
1526 }
1527 else
1528 {
1529 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1530
1531 /* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
1532 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1533 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1534 uPtrRet.pu64[2] = uOldRsp;
1535 uPtrRet.pu64[3] = uOldSS; /** @todo Testcase: What is written to the high words when pushing SS? */
1536 }
1537
1538 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfoRet, uNewRsp);
1539 if (rcStrict != VINF_SUCCESS)
1540 {
1541 Log(("BranchCallGate: New stack unmapping failed (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
1542 return rcStrict;
1543 }
1544
1545 /* Chop the high bits off if 16-bit gate (Intel says so). */
1546 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1547 uNewRip = (uint16_t)uNewRip;
1548
1549 /* Limit / canonical check. */
1550 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1551 if (!IEM_IS_LONG_MODE(pVCpu))
1552 {
1553 if (uNewRip > cbLimit)
1554 {
1555 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1556 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1557 }
1558 u64Base = X86DESC_BASE(&DescCS.Legacy);
1559 }
1560 else
1561 {
1562 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1563 if (!IEM_IS_CANONICAL(uNewRip))
1564 {
1565 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1566 return iemRaiseNotCanonical(pVCpu);
1567 }
1568 u64Base = 0;
1569 }
1570
1571 /*
1572 * Now set the accessed bit before
1573 * writing the return address to the stack and committing the result into
1574 * CS, CSHID and RIP.
1575 */
1576 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1577 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1578 {
1579 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1580 if (rcStrict != VINF_SUCCESS)
1581 return rcStrict;
1582 /** @todo check what VT-x and AMD-V does. */
1583 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1584 }
1585
1586 /* Commit new CS:rIP. */
1587 pVCpu->cpum.GstCtx.rip = uNewRip;
1588 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1589 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
1590 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1591 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1592 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1593 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1594 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1595 }
1596 else
1597 {
1598 /* Same privilege. */
1599 /** @todo This is very similar to regular far calls; merge! */
1600
1601 /* Check stack first - may #SS(0). */
1602 /** @todo check how gate size affects pushing of CS! Does callf 16:32 in
1603 * 16-bit code cause a two or four byte CS to be pushed? */
1604 uint8_t bUnmapInfoRet;
1605 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
1606 IEM_IS_LONG_MODE(pVCpu) ? 8+8
1607 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 4+4 : 2+2,
1608 IEM_IS_LONG_MODE(pVCpu) ? 7
1609 : pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE ? 3 : 2,
1610 &uPtrRet.pv, &bUnmapInfoRet, &uNewRsp);
1611 if (rcStrict != VINF_SUCCESS)
1612 return rcStrict;
1613
1614 /* Chop the high bits off if 16-bit gate (Intel says so). */
1615 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE)
1616 uNewRip = (uint16_t)uNewRip;
1617
1618 /* Limit / canonical check. */
1619 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1620 if (!IEM_IS_LONG_MODE(pVCpu))
1621 {
1622 if (uNewRip > cbLimit)
1623 {
1624 Log(("BranchCallGate %04x:%08RX64 -> out of bounds (%#x)\n", uNewCS, uNewRip, cbLimit));
1625 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, 0);
1626 }
1627 u64Base = X86DESC_BASE(&DescCS.Legacy);
1628 }
1629 else
1630 {
1631 if (!IEM_IS_CANONICAL(uNewRip))
1632 {
1633 Log(("BranchCallGate call %04x:%016RX64 - not canonical -> #GP\n", uNewCS, uNewRip));
1634 return iemRaiseNotCanonical(pVCpu);
1635 }
1636 u64Base = 0;
1637 }
1638
1639 /*
1640 * Now set the accessed bit before
1641 * writing the return address to the stack and committing the result into
1642 * CS, CSHID and RIP.
1643 */
1644 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1645 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1646 {
1647 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1648 if (rcStrict != VINF_SUCCESS)
1649 return rcStrict;
1650 /** @todo check what VT-x and AMD-V does. */
1651 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1652 }
1653
1654 /* stack */
1655 if (!IEM_IS_LONG_MODE(pVCpu))
1656 {
1657 if (pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_CALL_GATE)
1658 {
1659 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1660 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
1661 }
1662 else
1663 {
1664 Assert(pDesc->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_CALL_GATE);
1665 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1666 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1667 }
1668 }
1669 else
1670 {
1671 Assert(pDesc->Legacy.Gate.u4Type == AMD64_SEL_TYPE_SYS_CALL_GATE);
1672 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
1673 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
1674 }
1675
1676 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfoRet, uNewRsp);
1677 if (rcStrict != VINF_SUCCESS)
1678 return rcStrict;
1679
1680 /* commit */
1681 pVCpu->cpum.GstCtx.rip = uNewRip;
1682 pVCpu->cpum.GstCtx.cs.Sel = uNewCS & X86_SEL_MASK_OFF_RPL;
1683 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
1684 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1685 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1686 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1687 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1688 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1689 }
1690 }
1691 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
1692
1693 iemRecalcExecModeAndCplFlags(pVCpu);
1694
1695/** @todo single stepping */
1696
1697 /* Flush the prefetch buffer. */
1698 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
1699 return VINF_SUCCESS;
1700#endif /* IEM_IMPLEMENTS_CALLGATE */
1701}
1702
1703
1704/**
1705 * Implements far jumps and calls thru system selectors.
1706 *
1707 * @returns VBox strict status code.
1708 * @param pVCpu The cross context virtual CPU structure of the
1709 * calling thread.
1710 * @param cbInstr The current instruction length.
1711 * @param uSel The selector.
1712 * @param enmBranch The kind of branching we're performing.
1713 * @param enmEffOpSize The effective operand size.
1714 * @param pDesc The descriptor corresponding to @a uSel.
1715 */
1716static VBOXSTRICTRC iemCImpl_BranchSysSel(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uSel, IEMBRANCH enmBranch,
1717 IEMMODE enmEffOpSize, PIEMSELDESC pDesc)
1718{
1719 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
1720 Assert((uSel & X86_SEL_MASK_OFF_RPL));
1721 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1722
1723 if (IEM_IS_LONG_MODE(pVCpu))
1724 switch (pDesc->Legacy.Gen.u4Type)
1725 {
1726 case AMD64_SEL_TYPE_SYS_CALL_GATE:
1727 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1728
1729 default:
1730 case AMD64_SEL_TYPE_SYS_LDT:
1731 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
1732 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
1733 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
1734 case AMD64_SEL_TYPE_SYS_INT_GATE:
1735 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1736 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1737 }
1738
1739 switch (pDesc->Legacy.Gen.u4Type)
1740 {
1741 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1742 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1743 return iemCImpl_BranchCallGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1744
1745 case X86_SEL_TYPE_SYS_TASK_GATE:
1746 return iemCImpl_BranchTaskGate(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1747
1748 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1749 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1750 return iemCImpl_BranchTaskSegment(pVCpu, cbInstr, uSel, enmBranch, enmEffOpSize, pDesc);
1751
1752 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1753 Log(("branch %04x -> busy 286 TSS\n", uSel));
1754 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1755
1756 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1757 Log(("branch %04x -> busy 386 TSS\n", uSel));
1758 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1759
1760 default:
1761 case X86_SEL_TYPE_SYS_LDT:
1762 case X86_SEL_TYPE_SYS_286_INT_GATE:
1763 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1764 case X86_SEL_TYPE_SYS_386_INT_GATE:
1765 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1766 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
1767 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1768 }
1769}
1770
1771
1772/**
1773 * Implements far jumps.
1774 *
1775 * @param uSel The selector.
1776 * @param offSeg The segment offset.
1777 * @param enmEffOpSize The effective operand size.
1778 */
1779IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1780{
1781 NOREF(cbInstr);
1782 Assert(offSeg <= UINT32_MAX || (!IEM_IS_GUEST_CPU_AMD(pVCpu) && IEM_IS_64BIT_CODE(pVCpu)));
1783
1784 /*
1785 * Real mode and V8086 mode are easy. The only snag seems to be that
1786 * CS.limit doesn't change and the limit check is done against the current
1787 * limit.
1788 */
1789 /** @todo Robert Collins claims (The Segment Descriptor Cache, DDJ August
1790 * 1998) that up to and including the Intel 486, far control
1791 * transfers in real mode set default CS attributes (0x93) and also
1792 * set a 64K segment limit. Starting with the Pentium, the
1793 * attributes and limit are left alone but the access rights are
1794 * ignored. We only implement the Pentium+ behavior.
1795 * */
1796 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1797 {
1798 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1799 if (offSeg > pVCpu->cpum.GstCtx.cs.u32Limit)
1800 {
1801 Log(("iemCImpl_FarJmp: 16-bit limit\n"));
1802 return iemRaiseGeneralProtectionFault0(pVCpu);
1803 }
1804
1805 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
1806 pVCpu->cpum.GstCtx.rip = offSeg;
1807 else
1808 pVCpu->cpum.GstCtx.rip = offSeg & UINT16_MAX;
1809 pVCpu->cpum.GstCtx.cs.Sel = uSel;
1810 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
1811 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1812 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
1813
1814 /* Update the FLAT 32-bit mode flag, if we're in 32-bit unreal mode (unlikely): */
1815 if (RT_LIKELY(!IEM_IS_32BIT_CODE(pVCpu)))
1816 { /* likely */ }
1817 else if (uSel != 0)
1818 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
1819 else
1820 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
1821 | iemCalc32BitFlatIndicator(pVCpu);
1822
1823 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
1824 }
1825
1826 /*
1827 * Protected mode. Need to parse the specified descriptor...
1828 */
1829 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1830 {
1831 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1832 return iemRaiseGeneralProtectionFault0(pVCpu);
1833 }
1834
1835 /* Fetch the descriptor. */
1836 IEMSELDESC Desc;
1837 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
1838 if (rcStrict != VINF_SUCCESS)
1839 return rcStrict;
1840
1841 /* Is it there? */
1842 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
1843 {
1844 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1845 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
1846 }
1847
1848 /*
1849 * Deal with it according to its type. We do the standard code selectors
1850 * here and dispatch the system selectors to worker functions.
1851 */
1852 if (!Desc.Legacy.Gen.u1DescType)
1853 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
1854
1855 /* Only code segments. */
1856 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1857 {
1858 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1859 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1860 }
1861
1862 /* L vs D. */
1863 if ( Desc.Legacy.Gen.u1Long
1864 && Desc.Legacy.Gen.u1DefBig
1865 && IEM_IS_LONG_MODE(pVCpu))
1866 {
1867 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1868 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1869 }
1870
1871 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1872 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1873 {
1874 if (IEM_GET_CPL(pVCpu) < Desc.Legacy.Gen.u2Dpl)
1875 {
1876 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1877 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1878 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1879 }
1880 }
1881 else
1882 {
1883 if (IEM_GET_CPL(pVCpu) != Desc.Legacy.Gen.u2Dpl)
1884 {
1885 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1886 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1887 }
1888 if ((uSel & X86_SEL_RPL) > IEM_GET_CPL(pVCpu))
1889 {
1890 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu)));
1891 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1892 }
1893 }
1894
1895 /* Chop the high bits if 16-bit (Intel says so). */
1896 if (enmEffOpSize == IEMMODE_16BIT)
1897 offSeg &= UINT16_MAX;
1898
1899 /* Limit check and get the base. */
1900 uint64_t u64Base;
1901 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1902 if ( !Desc.Legacy.Gen.u1Long
1903 || !IEM_IS_LONG_MODE(pVCpu))
1904 {
1905 if (RT_LIKELY(offSeg <= cbLimit))
1906 u64Base = X86DESC_BASE(&Desc.Legacy);
1907 else
1908 {
1909 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1910 /** @todo Intel says this is \#GP(0)! */
1911 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
1912 }
1913 }
1914 else
1915 u64Base = 0;
1916
1917 /*
1918 * Ok, everything checked out fine. Now set the accessed bit before
1919 * committing the result into CS, CSHID and RIP.
1920 */
1921 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1922 {
1923 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
1924 if (rcStrict != VINF_SUCCESS)
1925 return rcStrict;
1926 /** @todo check what VT-x and AMD-V does. */
1927 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1928 }
1929
1930 /* commit */
1931 pVCpu->cpum.GstCtx.rip = offSeg;
1932 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1933 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu); /** @todo is this right for conforming segs? or in general? */
1934 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
1935 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1936 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1937 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1938 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1939
1940 /** @todo check if the hidden bits are loaded correctly for 64-bit
1941 * mode. */
1942
1943 iemRecalcExecModeAndCplFlags(pVCpu);
1944
1945 /* Flush the prefetch buffer. */
1946 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
1947
1948 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
1949}
1950
1951
1952/**
1953 * Implements far calls.
1954 *
1955 * This very similar to iemCImpl_FarJmp.
1956 *
1957 * @param uSel The selector.
1958 * @param offSeg The segment offset.
1959 * @param enmEffOpSize The operand size (in case we need it).
1960 */
1961IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1962{
1963 VBOXSTRICTRC rcStrict;
1964 uint64_t uNewRsp;
1965 RTPTRUNION uPtrRet;
1966 uint8_t bUnmapInfo;
1967
1968 /*
1969 * Real mode and V8086 mode are easy. The only snag seems to be that
1970 * CS.limit doesn't change and the limit check is done against the current
1971 * limit.
1972 */
1973 /** @todo See comment for similar code in iemCImpl_FarJmp */
1974 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
1975 {
1976 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1977
1978 /* Check stack first - may #SS(0). */
1979 rcStrict = iemMemStackPushBeginSpecial(pVCpu, enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1980 enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
1981 &uPtrRet.pv, &bUnmapInfo, &uNewRsp);
1982 if (rcStrict != VINF_SUCCESS)
1983 return rcStrict;
1984
1985 /* Check the target address range. */
1986/** @todo this must be wrong! Write unreal mode tests! */
1987 if (offSeg > UINT32_MAX)
1988 return iemRaiseGeneralProtectionFault0(pVCpu);
1989
1990 /* Everything is fine, push the return address. */
1991 if (enmEffOpSize == IEMMODE_16BIT)
1992 {
1993 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
1994 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1995 }
1996 else
1997 {
1998 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
1999 uPtrRet.pu16[2] = pVCpu->cpum.GstCtx.cs.Sel;
2000 }
2001 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2002 if (rcStrict != VINF_SUCCESS)
2003 return rcStrict;
2004
2005 /* Branch. */
2006 pVCpu->cpum.GstCtx.rip = offSeg;
2007 pVCpu->cpum.GstCtx.cs.Sel = uSel;
2008 pVCpu->cpum.GstCtx.cs.ValidSel = uSel;
2009 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2010 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uSel << 4;
2011
2012 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2013 }
2014
2015 /*
2016 * Protected mode. Need to parse the specified descriptor...
2017 */
2018 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2019 {
2020 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
2021 return iemRaiseGeneralProtectionFault0(pVCpu);
2022 }
2023
2024 /* Fetch the descriptor. */
2025 IEMSELDESC Desc;
2026 rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP);
2027 if (rcStrict != VINF_SUCCESS)
2028 return rcStrict;
2029
2030 /*
2031 * Deal with it according to its type. We do the standard code selectors
2032 * here and dispatch the system selectors to worker functions.
2033 */
2034 if (!Desc.Legacy.Gen.u1DescType)
2035 return iemCImpl_BranchSysSel(pVCpu, cbInstr, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
2036
2037 /* Only code segments. */
2038 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2039 {
2040 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
2041 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2042 }
2043
2044 /* L vs D. */
2045 if ( Desc.Legacy.Gen.u1Long
2046 && Desc.Legacy.Gen.u1DefBig
2047 && IEM_IS_LONG_MODE(pVCpu))
2048 {
2049 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
2050 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2051 }
2052
2053 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
2054 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2055 {
2056 if (IEM_GET_CPL(pVCpu) < Desc.Legacy.Gen.u2Dpl)
2057 {
2058 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
2059 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2060 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2061 }
2062 }
2063 else
2064 {
2065 if (IEM_GET_CPL(pVCpu) != Desc.Legacy.Gen.u2Dpl)
2066 {
2067 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2068 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2069 }
2070 if ((uSel & X86_SEL_RPL) > IEM_GET_CPL(pVCpu))
2071 {
2072 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu)));
2073 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2074 }
2075 }
2076
2077 /* Is it there? */
2078 if (!Desc.Legacy.Gen.u1Present)
2079 {
2080 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
2081 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
2082 }
2083
2084 /* Check stack first - may #SS(0). */
2085 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
2086 * 16-bit code cause a two or four byte CS to be pushed? */
2087 rcStrict = iemMemStackPushBeginSpecial(pVCpu,
2088 enmEffOpSize == IEMMODE_64BIT ? 8+8 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
2089 enmEffOpSize == IEMMODE_64BIT ? 7 : enmEffOpSize == IEMMODE_32BIT ? 3 : 1,
2090 &uPtrRet.pv, &bUnmapInfo, &uNewRsp);
2091 if (rcStrict != VINF_SUCCESS)
2092 return rcStrict;
2093
2094 /* Chop the high bits if 16-bit (Intel says so). */
2095 if (enmEffOpSize == IEMMODE_16BIT)
2096 offSeg &= UINT16_MAX;
2097
2098 /* Limit / canonical check. */
2099 uint64_t u64Base;
2100 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2101 if ( !Desc.Legacy.Gen.u1Long
2102 || !IEM_IS_LONG_MODE(pVCpu))
2103 {
2104 if (RT_LIKELY(offSeg <= cbLimit))
2105 u64Base = X86DESC_BASE(&Desc.Legacy);
2106 else
2107 {
2108 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
2109 /** @todo Intel says this is \#GP(0)! */
2110 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
2111 }
2112 }
2113 else if (IEM_IS_CANONICAL(offSeg))
2114 u64Base = 0;
2115 else
2116 {
2117 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
2118 return iemRaiseNotCanonical(pVCpu);
2119 }
2120
2121 /*
2122 * Now set the accessed bit before
2123 * writing the return address to the stack and committing the result into
2124 * CS, CSHID and RIP.
2125 */
2126 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2127 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2128 {
2129 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2130 if (rcStrict != VINF_SUCCESS)
2131 return rcStrict;
2132 /** @todo check what VT-x and AMD-V does. */
2133 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2134 }
2135
2136 /* stack */
2137 if (enmEffOpSize == IEMMODE_16BIT)
2138 {
2139 uPtrRet.pu16[0] = pVCpu->cpum.GstCtx.ip + cbInstr;
2140 uPtrRet.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
2141 }
2142 else if (enmEffOpSize == IEMMODE_32BIT)
2143 {
2144 uPtrRet.pu32[0] = pVCpu->cpum.GstCtx.eip + cbInstr;
2145 uPtrRet.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
2146 }
2147 else
2148 {
2149 uPtrRet.pu64[0] = pVCpu->cpum.GstCtx.rip + cbInstr;
2150 uPtrRet.pu64[1] = pVCpu->cpum.GstCtx.cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
2151 }
2152 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
2153 if (rcStrict != VINF_SUCCESS)
2154 return rcStrict;
2155
2156 /* commit */
2157 pVCpu->cpum.GstCtx.rip = offSeg;
2158 pVCpu->cpum.GstCtx.cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
2159 pVCpu->cpum.GstCtx.cs.Sel |= IEM_GET_CPL(pVCpu);
2160 pVCpu->cpum.GstCtx.cs.ValidSel = pVCpu->cpum.GstCtx.cs.Sel;
2161 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2162 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2163 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2164 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2165
2166 /** @todo check if the hidden bits are loaded correctly for 64-bit
2167 * mode. */
2168
2169 iemRecalcExecModeAndCplFlags(pVCpu);
2170
2171 /* Flush the prefetch buffer. */
2172 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
2173
2174 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2175}
2176
2177
2178/**
2179 * Implements retf.
2180 *
2181 * @param enmEffOpSize The effective operand size.
2182 * @param cbPop The amount of arguments to pop from the stack
2183 * (bytes).
2184 */
2185IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
2186{
2187 NOREF(cbInstr);
2188
2189 /*
2190 * Read the stack values first.
2191 */
2192 RTUINT64U NewRsp;
2193 uint8_t bUnmapInfo;
2194 RTCPTRUNION uPtrFrame;
2195 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
2196 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
2197 VBOXSTRICTRC rcStrict = iemMemStackPopBeginSpecial(pVCpu, cbRetPtr,
2198 enmEffOpSize == IEMMODE_16BIT ? 1 : enmEffOpSize == IEMMODE_32BIT ? 3 : 7,
2199 &uPtrFrame.pv, &bUnmapInfo, &NewRsp.u);
2200 if (rcStrict != VINF_SUCCESS)
2201 return rcStrict;
2202
2203 uint64_t uNewRip;
2204 uint16_t uNewCs;
2205 if (enmEffOpSize == IEMMODE_16BIT)
2206 {
2207 uNewRip = uPtrFrame.pu16[0];
2208 uNewCs = uPtrFrame.pu16[1];
2209 }
2210 else if (enmEffOpSize == IEMMODE_32BIT)
2211 {
2212 uNewRip = uPtrFrame.pu32[0];
2213 uNewCs = uPtrFrame.pu16[2];
2214 }
2215 else
2216 {
2217 uNewRip = uPtrFrame.pu64[0];
2218 uNewCs = uPtrFrame.pu16[4];
2219 }
2220
2221 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo);
2222 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2223 { /* extremely likely */ }
2224 else
2225 return rcStrict;
2226
2227 /*
2228 * Real mode and V8086 mode are easy.
2229 */
2230 /** @todo See comment for similar code in iemCImpl_FarJmp */
2231 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
2232 {
2233 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2234 /** @todo check how this is supposed to work if sp=0xfffe. */
2235
2236 /* Check the limit of the new EIP. */
2237 /** @todo Intel pseudo code only does the limit check for 16-bit
2238 * operands, AMD does not make any distinction. What is right? */
2239 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
2240 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2241
2242 /* commit the operation. */
2243 if (cbPop)
2244 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2245 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2246 pVCpu->cpum.GstCtx.rip = uNewRip;
2247 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2248 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2249 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2250 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2251 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2252 }
2253
2254 /*
2255 * Protected mode is complicated, of course.
2256 */
2257 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2258 {
2259 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
2260 return iemRaiseGeneralProtectionFault0(pVCpu);
2261 }
2262
2263 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
2264
2265 /* Fetch the descriptor. */
2266 IEMSELDESC DescCs;
2267 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCs, uNewCs, X86_XCPT_GP);
2268 if (rcStrict != VINF_SUCCESS)
2269 return rcStrict;
2270
2271 /* Can only return to a code selector. */
2272 if ( !DescCs.Legacy.Gen.u1DescType
2273 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
2274 {
2275 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
2276 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
2277 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2278 }
2279
2280 /* L vs D. */
2281 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
2282 && DescCs.Legacy.Gen.u1DefBig
2283 && IEM_IS_LONG_MODE(pVCpu))
2284 {
2285 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
2286 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2287 }
2288
2289 /* DPL/RPL/CPL checks. */
2290 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
2291 {
2292 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, IEM_GET_CPL(pVCpu)));
2293 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2294 }
2295
2296 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2297 {
2298 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
2299 {
2300 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
2301 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2302 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2303 }
2304 }
2305 else
2306 {
2307 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
2308 {
2309 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
2310 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
2311 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2312 }
2313 }
2314
2315 /* Is it there? */
2316 if (!DescCs.Legacy.Gen.u1Present)
2317 {
2318 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
2319 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2320 }
2321
2322 /*
2323 * Return to outer privilege? (We'll typically have entered via a call gate.)
2324 */
2325 if ((uNewCs & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
2326 {
2327 /* Read the outer stack pointer stored *after* the parameters. */
2328 rcStrict = iemMemStackPopContinueSpecial(pVCpu, cbPop /*off*/, cbRetPtr, &uPtrFrame.pv, &bUnmapInfo, NewRsp.u);
2329 if (rcStrict != VINF_SUCCESS)
2330 return rcStrict;
2331
2332 uint16_t uNewOuterSs;
2333 RTUINT64U NewOuterRsp;
2334 if (enmEffOpSize == IEMMODE_16BIT)
2335 {
2336 NewOuterRsp.u = uPtrFrame.pu16[0];
2337 uNewOuterSs = uPtrFrame.pu16[1];
2338 }
2339 else if (enmEffOpSize == IEMMODE_32BIT)
2340 {
2341 NewOuterRsp.u = uPtrFrame.pu32[0];
2342 uNewOuterSs = uPtrFrame.pu16[2];
2343 }
2344 else
2345 {
2346 NewOuterRsp.u = uPtrFrame.pu64[0];
2347 uNewOuterSs = uPtrFrame.pu16[4];
2348 }
2349 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo);
2350 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2351 { /* extremely likely */ }
2352 else
2353 return rcStrict;
2354
2355 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
2356 and read the selector. */
2357 IEMSELDESC DescSs;
2358 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
2359 {
2360 if ( !DescCs.Legacy.Gen.u1Long
2361 || (uNewOuterSs & X86_SEL_RPL) == 3)
2362 {
2363 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
2364 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2365 return iemRaiseGeneralProtectionFault0(pVCpu);
2366 }
2367 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
2368 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
2369 }
2370 else
2371 {
2372 /* Fetch the descriptor for the new stack segment. */
2373 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSs, uNewOuterSs, X86_XCPT_GP);
2374 if (rcStrict != VINF_SUCCESS)
2375 return rcStrict;
2376 }
2377
2378 /* Check that RPL of stack and code selectors match. */
2379 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
2380 {
2381 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2382 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2383 }
2384
2385 /* Must be a writable data segment. */
2386 if ( !DescSs.Legacy.Gen.u1DescType
2387 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2388 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2389 {
2390 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
2391 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
2392 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2393 }
2394
2395 /* L vs D. (Not mentioned by intel.) */
2396 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
2397 && DescSs.Legacy.Gen.u1DefBig
2398 && IEM_IS_LONG_MODE(pVCpu))
2399 {
2400 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
2401 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2402 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2403 }
2404
2405 /* DPL/RPL/CPL checks. */
2406 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2407 {
2408 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
2409 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
2410 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewOuterSs);
2411 }
2412
2413 /* Is it there? */
2414 if (!DescSs.Legacy.Gen.u1Present)
2415 {
2416 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2417 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
2418 }
2419
2420 /* Calc SS limit.*/
2421 uint64_t u64BaseSs;
2422 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
2423
2424 /* Is RIP canonical or within CS.limit? */
2425 uint64_t u64BaseCs;
2426 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2427
2428 /** @todo Testcase: Is this correct? */
2429 if ( DescCs.Legacy.Gen.u1Long
2430 && IEM_IS_LONG_MODE(pVCpu) )
2431 {
2432 if (!IEM_IS_CANONICAL(uNewRip))
2433 {
2434 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u));
2435 return iemRaiseNotCanonical(pVCpu);
2436 }
2437 u64BaseCs = 0;
2438 u64BaseSs = 0;
2439 }
2440 else
2441 {
2442 if (uNewRip > cbLimitCs)
2443 {
2444 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
2445 uNewCs, uNewRip, uNewOuterSs, NewOuterRsp.u, cbLimitCs));
2446 /** @todo Intel says this is \#GP(0)! */
2447 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2448 }
2449 u64BaseCs = X86DESC_BASE(&DescCs.Legacy);
2450 u64BaseSs = X86DESC_BASE(&DescSs.Legacy);
2451 }
2452
2453 /*
2454 * Now set the accessed bit before
2455 * writing the return address to the stack and committing the result into
2456 * CS, CSHID and RIP.
2457 */
2458 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
2459 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2460 {
2461 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2462 if (rcStrict != VINF_SUCCESS)
2463 return rcStrict;
2464 /** @todo check what VT-x and AMD-V does. */
2465 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2466 }
2467 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
2468 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2469 {
2470 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewOuterSs);
2471 if (rcStrict != VINF_SUCCESS)
2472 return rcStrict;
2473 /** @todo check what VT-x and AMD-V does. */
2474 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2475 }
2476
2477 /* commit */
2478 if (enmEffOpSize == IEMMODE_16BIT)
2479 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2480 else
2481 pVCpu->cpum.GstCtx.rip = uNewRip;
2482 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2483 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2484 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2485 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2486 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2487 pVCpu->cpum.GstCtx.cs.u64Base = u64BaseCs;
2488 pVCpu->cpum.GstCtx.ss.Sel = uNewOuterSs;
2489 pVCpu->cpum.GstCtx.ss.ValidSel = uNewOuterSs;
2490 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2491 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
2492 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
2493 pVCpu->cpum.GstCtx.ss.u64Base = u64BaseSs;
2494
2495 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
2496 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
2497 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
2498 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
2499
2500 iemRecalcExecModeAndCplFlags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */
2501
2502 if (cbPop)
2503 iemRegAddToRspEx(pVCpu, &NewOuterRsp, cbPop);
2504 if (IEM_IS_64BIT_CODE(pVCpu))
2505 pVCpu->cpum.GstCtx.rsp = NewOuterRsp.u;
2506 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2507 pVCpu->cpum.GstCtx.rsp = (uint32_t)NewOuterRsp.u;
2508 else
2509 pVCpu->cpum.GstCtx.sp = (uint16_t)NewOuterRsp.u;
2510
2511 iemRecalcExecModeAndCplFlags(pVCpu); /* Affects iemRegAddToRspEx and the setting of RSP/SP below. */
2512
2513 /** @todo check if the hidden bits are loaded correctly for 64-bit
2514 * mode. */
2515 }
2516 /*
2517 * Return to the same privilege level
2518 */
2519 else
2520 {
2521 /* Limit / canonical check. */
2522 uint64_t u64Base;
2523 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
2524
2525 /** @todo Testcase: Is this correct? */
2526 bool f64BitCs = false;
2527 if ( DescCs.Legacy.Gen.u1Long
2528 && IEM_IS_LONG_MODE(pVCpu) )
2529 {
2530 if (!IEM_IS_CANONICAL(uNewRip))
2531 {
2532 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
2533 return iemRaiseNotCanonical(pVCpu);
2534 }
2535 u64Base = 0;
2536 f64BitCs = true;
2537 f64BitCs = true;
2538 }
2539 else
2540 {
2541 if (uNewRip > cbLimitCs)
2542 {
2543 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
2544 /** @todo Intel says this is \#GP(0)! */
2545 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
2546 }
2547 u64Base = X86DESC_BASE(&DescCs.Legacy);
2548 }
2549
2550 /*
2551 * Now set the accessed bit before
2552 * writing the return address to the stack and committing the result into
2553 * CS, CSHID and RIP.
2554 */
2555 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
2556 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2557 {
2558 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
2559 if (rcStrict != VINF_SUCCESS)
2560 return rcStrict;
2561 /** @todo check what VT-x and AMD-V does. */
2562 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2563 }
2564
2565 /* commit */
2566 if (cbPop)
2567/** @todo This cannot be right. We're using the old CS mode here, and iemRegAddToRspEx checks fExec. */
2568 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
2569 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig || f64BitCs)
2570 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2571 else
2572 pVCpu->cpum.GstCtx.sp = (uint16_t)NewRsp.u;
2573 if (enmEffOpSize == IEMMODE_16BIT)
2574 pVCpu->cpum.GstCtx.rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
2575 else
2576 pVCpu->cpum.GstCtx.rip = uNewRip;
2577 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2578 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2579 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2580 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
2581 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCs;
2582 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2583 /** @todo check if the hidden bits are loaded correctly for 64-bit
2584 * mode. */
2585
2586 iemRecalcExecModeAndCplFlags(pVCpu);
2587 }
2588
2589 /* Flush the prefetch buffer. */
2590 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo use light flush for same privilege? */
2591
2592 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2593}
2594
2595
2596/**
2597 * Implements enter.
2598 *
2599 * We're doing this in C because the instruction is insane, even for the
2600 * u8NestingLevel=0 case dealing with the stack is tedious.
2601 *
2602 * @param enmEffOpSize The effective operand size.
2603 * @param cbFrame Frame size.
2604 * @param cParameters Frame parameter count.
2605 */
2606IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
2607{
2608 /* Push RBP, saving the old value in TmpRbp. */
2609 RTUINT64U NewRsp; NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2610 RTUINT64U TmpRbp; TmpRbp.u = pVCpu->cpum.GstCtx.rbp;
2611 RTUINT64U NewRbp;
2612 VBOXSTRICTRC rcStrict;
2613 if (enmEffOpSize == IEMMODE_64BIT)
2614 {
2615 rcStrict = iemMemStackPushU64Ex(pVCpu, TmpRbp.u, &NewRsp);
2616 NewRbp = NewRsp;
2617 }
2618 else if (enmEffOpSize == IEMMODE_32BIT)
2619 {
2620 rcStrict = iemMemStackPushU32Ex(pVCpu, TmpRbp.DWords.dw0, &NewRsp);
2621 NewRbp = NewRsp;
2622 }
2623 else
2624 {
2625 rcStrict = iemMemStackPushU16Ex(pVCpu, TmpRbp.Words.w0, &NewRsp);
2626 NewRbp = TmpRbp;
2627 NewRbp.Words.w0 = NewRsp.Words.w0;
2628 }
2629 if (rcStrict != VINF_SUCCESS)
2630 return rcStrict;
2631
2632 /* Copy the parameters (aka nesting levels by Intel). */
2633 cParameters &= 0x1f;
2634 if (cParameters > 0)
2635 {
2636 switch (enmEffOpSize)
2637 {
2638 case IEMMODE_16BIT:
2639 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2640 TmpRbp.DWords.dw0 -= 2;
2641 else
2642 TmpRbp.Words.w0 -= 2;
2643 do
2644 {
2645 uint16_t u16Tmp;
2646 rcStrict = iemMemStackPopU16Ex(pVCpu, &u16Tmp, &TmpRbp);
2647 if (rcStrict != VINF_SUCCESS)
2648 break;
2649 rcStrict = iemMemStackPushU16Ex(pVCpu, u16Tmp, &NewRsp);
2650 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2651 break;
2652
2653 case IEMMODE_32BIT:
2654 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2655 TmpRbp.DWords.dw0 -= 4;
2656 else
2657 TmpRbp.Words.w0 -= 4;
2658 do
2659 {
2660 uint32_t u32Tmp;
2661 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Tmp, &TmpRbp);
2662 if (rcStrict != VINF_SUCCESS)
2663 break;
2664 rcStrict = iemMemStackPushU32Ex(pVCpu, u32Tmp, &NewRsp);
2665 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2666 break;
2667
2668 case IEMMODE_64BIT:
2669 TmpRbp.u -= 8;
2670 do
2671 {
2672 uint64_t u64Tmp;
2673 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Tmp, &TmpRbp);
2674 if (rcStrict != VINF_SUCCESS)
2675 break;
2676 rcStrict = iemMemStackPushU64Ex(pVCpu, u64Tmp, &NewRsp);
2677 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
2678 break;
2679
2680 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2681 }
2682 if (rcStrict != VINF_SUCCESS)
2683 return VINF_SUCCESS;
2684
2685 /* Push the new RBP */
2686 if (enmEffOpSize == IEMMODE_64BIT)
2687 rcStrict = iemMemStackPushU64Ex(pVCpu, NewRbp.u, &NewRsp);
2688 else if (enmEffOpSize == IEMMODE_32BIT)
2689 rcStrict = iemMemStackPushU32Ex(pVCpu, NewRbp.DWords.dw0, &NewRsp);
2690 else
2691 rcStrict = iemMemStackPushU16Ex(pVCpu, NewRbp.Words.w0, &NewRsp);
2692 if (rcStrict != VINF_SUCCESS)
2693 return rcStrict;
2694
2695 }
2696
2697 /* Recalc RSP. */
2698 iemRegSubFromRspEx(pVCpu, &NewRsp, cbFrame);
2699
2700 /** @todo Should probe write access at the new RSP according to AMD. */
2701 /** @todo Should handle accesses to the VMX APIC-access page. */
2702
2703 /* Commit it. */
2704 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2705 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2706 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2707}
2708
2709
2710
2711/**
2712 * Implements leave.
2713 *
2714 * We're doing this in C because messing with the stack registers is annoying
2715 * since they depends on SS attributes.
2716 *
2717 * @param enmEffOpSize The effective operand size.
2718 */
2719IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
2720{
2721 /* Calculate the intermediate RSP from RBP and the stack attributes. */
2722 RTUINT64U NewRsp;
2723 if (IEM_IS_64BIT_CODE(pVCpu))
2724 NewRsp.u = pVCpu->cpum.GstCtx.rbp;
2725 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2726 NewRsp.u = pVCpu->cpum.GstCtx.ebp;
2727 else
2728 {
2729 /** @todo Check that LEAVE actually preserve the high EBP bits. */
2730 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
2731 NewRsp.Words.w0 = pVCpu->cpum.GstCtx.bp;
2732 }
2733
2734 /* Pop RBP according to the operand size. */
2735 VBOXSTRICTRC rcStrict;
2736 RTUINT64U NewRbp;
2737 switch (enmEffOpSize)
2738 {
2739 case IEMMODE_16BIT:
2740 NewRbp.u = pVCpu->cpum.GstCtx.rbp;
2741 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRbp.Words.w0, &NewRsp);
2742 break;
2743 case IEMMODE_32BIT:
2744 NewRbp.u = 0;
2745 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRbp.DWords.dw0, &NewRsp);
2746 break;
2747 case IEMMODE_64BIT:
2748 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRbp.u, &NewRsp);
2749 break;
2750 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2751 }
2752 if (rcStrict != VINF_SUCCESS)
2753 return rcStrict;
2754
2755
2756 /* Commit it. */
2757 pVCpu->cpum.GstCtx.rbp = NewRbp.u;
2758 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
2759 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
2760}
2761
2762
2763/**
2764 * Implements int3 and int XX.
2765 *
2766 * @param u8Int The interrupt vector number.
2767 * @param enmInt The int instruction type.
2768 */
2769IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt)
2770{
2771 Assert(pVCpu->iem.s.cXcptRecursions == 0);
2772
2773 /*
2774 * We must check if this INT3 might belong to DBGF before raising a #BP.
2775 */
2776 if (u8Int == 3)
2777 {
2778 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2779 if (pVM->dbgf.ro.cEnabledInt3Breakpoints == 0)
2780 { /* likely: No vbox debugger breakpoints */ }
2781 else
2782 {
2783 VBOXSTRICTRC rcStrict = DBGFTrap03Handler(pVM, pVCpu, &pVCpu->cpum.GstCtx);
2784 Log(("iemCImpl_int: DBGFTrap03Handler -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2785 if (rcStrict != VINF_EM_RAW_GUEST_TRAP)
2786 return iemSetPassUpStatus(pVCpu, rcStrict);
2787 }
2788 }
2789/** @todo single stepping */
2790 return iemRaiseXcptOrInt(pVCpu,
2791 cbInstr,
2792 u8Int,
2793 IEM_XCPT_FLAGS_T_SOFT_INT | enmInt,
2794 0,
2795 0);
2796}
2797
2798
2799/**
2800 * Implements iret for real mode and V8086 mode.
2801 *
2802 * @param enmEffOpSize The effective operand size.
2803 */
2804IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
2805{
2806 X86EFLAGS Efl;
2807 Efl.u = IEMMISC_GET_EFL(pVCpu);
2808 NOREF(cbInstr);
2809
2810 /*
2811 * iret throws an exception if VME isn't enabled.
2812 */
2813 if ( Efl.Bits.u1VM
2814 && Efl.Bits.u2IOPL != 3
2815 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME))
2816 return iemRaiseGeneralProtectionFault0(pVCpu);
2817
2818 /*
2819 * Do the stack bits, but don't commit RSP before everything checks
2820 * out right.
2821 */
2822 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2823 VBOXSTRICTRC rcStrict;
2824 uint8_t bUnmapInfo;
2825 RTCPTRUNION uFrame;
2826 uint16_t uNewCs;
2827 uint32_t uNewEip;
2828 uint32_t uNewFlags;
2829 uint64_t uNewRsp;
2830 if (enmEffOpSize == IEMMODE_32BIT)
2831 {
2832 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
2833 if (rcStrict != VINF_SUCCESS)
2834 return rcStrict;
2835 uNewEip = uFrame.pu32[0];
2836 if (uNewEip > UINT16_MAX)
2837 return iemRaiseGeneralProtectionFault0(pVCpu);
2838
2839 uNewCs = (uint16_t)uFrame.pu32[1];
2840 uNewFlags = uFrame.pu32[2];
2841 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2842 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
2843 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
2844 | X86_EFL_ID;
2845 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
2846 uNewFlags &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
2847 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
2848 }
2849 else
2850 {
2851 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
2852 if (rcStrict != VINF_SUCCESS)
2853 return rcStrict;
2854 uNewEip = uFrame.pu16[0];
2855 uNewCs = uFrame.pu16[1];
2856 uNewFlags = uFrame.pu16[2];
2857 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2858 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
2859 uNewFlags |= Efl.u & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF);
2860 /** @todo The intel pseudo code does not indicate what happens to
2861 * reserved flags. We just ignore them. */
2862 /* Ancient CPU adjustments: See iemCImpl_popf. */
2863 if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_286)
2864 uNewFlags &= ~(X86_EFL_NT | X86_EFL_IOPL);
2865 }
2866 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo);
2867 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2868 { /* extremely likely */ }
2869 else
2870 return rcStrict;
2871
2872 /** @todo Check how this is supposed to work if sp=0xfffe. */
2873 Log7(("iemCImpl_iret_real_v8086: uNewCs=%#06x uNewRip=%#010x uNewFlags=%#x uNewRsp=%#18llx\n",
2874 uNewCs, uNewEip, uNewFlags, uNewRsp));
2875
2876 /*
2877 * Check the limit of the new EIP.
2878 */
2879 /** @todo Only the AMD pseudo code check the limit here, what's
2880 * right? */
2881 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
2882 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2883
2884 /*
2885 * V8086 checks and flag adjustments
2886 */
2887 if (Efl.Bits.u1VM)
2888 {
2889 if (Efl.Bits.u2IOPL == 3)
2890 {
2891 /* Preserve IOPL and clear RF. */
2892 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
2893 uNewFlags |= Efl.u & (X86_EFL_IOPL);
2894 }
2895 else if ( enmEffOpSize == IEMMODE_16BIT
2896 && ( !(uNewFlags & X86_EFL_IF)
2897 || !Efl.Bits.u1VIP )
2898 && !(uNewFlags & X86_EFL_TF) )
2899 {
2900 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
2901 uNewFlags &= ~X86_EFL_VIF;
2902 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2903 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2904 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2905 }
2906 else
2907 return iemRaiseGeneralProtectionFault0(pVCpu);
2908 Log7(("iemCImpl_iret_real_v8086: u1VM=1: adjusted uNewFlags=%#x\n", uNewFlags));
2909 }
2910
2911 /*
2912 * Commit the operation.
2913 */
2914#ifdef DBGFTRACE_ENABLED
2915 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
2916 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewRsp);
2917#endif
2918 pVCpu->cpum.GstCtx.rsp = uNewRsp;
2919 pVCpu->cpum.GstCtx.rip = uNewEip;
2920 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
2921 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
2922 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2923 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)uNewCs << 4;
2924 /** @todo do we load attribs and limit as well? */
2925 Assert(uNewFlags & X86_EFL_1);
2926 IEMMISC_SET_EFL(pVCpu, uNewFlags);
2927
2928 /* Flush the prefetch buffer. */
2929 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo can do light flush in real mode at least */
2930
2931/** @todo single stepping */
2932 return VINF_SUCCESS;
2933}
2934
2935
2936/**
2937 * Loads a segment register when entering V8086 mode.
2938 *
2939 * @param pSReg The segment register.
2940 * @param uSeg The segment to load.
2941 */
2942static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2943{
2944 pSReg->Sel = uSeg;
2945 pSReg->ValidSel = uSeg;
2946 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2947 pSReg->u64Base = (uint32_t)uSeg << 4;
2948 pSReg->u32Limit = 0xffff;
2949 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2950 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2951 * IRET'ing to V8086. */
2952}
2953
2954
2955/**
2956 * Implements iret for protected mode returning to V8086 mode.
2957 *
2958 * @param uNewEip The new EIP.
2959 * @param uNewCs The new CS.
2960 * @param uNewFlags The new EFLAGS.
2961 * @param uNewRsp The RSP after the initial IRET frame.
2962 *
2963 * @note This can only be a 32-bit iret du to the X86_EFL_VM position.
2964 */
2965IEM_CIMPL_DEF_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp)
2966{
2967 RT_NOREF_PV(cbInstr);
2968 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK);
2969
2970 /*
2971 * Pop the V8086 specific frame bits off the stack.
2972 */
2973 uint8_t bUnmapInfo;
2974 RTCPTRUNION uFrame;
2975 VBOXSTRICTRC rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 24 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp);
2976 if (rcStrict != VINF_SUCCESS)
2977 return rcStrict;
2978 uint32_t uNewEsp = uFrame.pu32[0];
2979 uint16_t uNewSs = uFrame.pu32[1];
2980 uint16_t uNewEs = uFrame.pu32[2];
2981 uint16_t uNewDs = uFrame.pu32[3];
2982 uint16_t uNewFs = uFrame.pu32[4];
2983 uint16_t uNewGs = uFrame.pu32[5];
2984 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */
2985 if (rcStrict != VINF_SUCCESS)
2986 return rcStrict;
2987
2988 /*
2989 * Commit the operation.
2990 */
2991 uNewFlags &= X86_EFL_LIVE_MASK;
2992 uNewFlags |= X86_EFL_RA1_MASK;
2993#ifdef DBGFTRACE_ENABLED
2994 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
2995 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp);
2996#endif
2997 Log7(("iemCImpl_iret_prot_v8086: %04x:%08x -> %04x:%04x %x %04x:%04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp));
2998
2999 IEMMISC_SET_EFL(pVCpu, uNewFlags);
3000 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.cs, uNewCs);
3001 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ss, uNewSs);
3002 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.es, uNewEs);
3003 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.ds, uNewDs);
3004 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.fs, uNewFs);
3005 iemCImplCommonV8086LoadSeg(&pVCpu->cpum.GstCtx.gs, uNewGs);
3006 pVCpu->cpum.GstCtx.rip = (uint16_t)uNewEip;
3007 pVCpu->cpum.GstCtx.rsp = uNewEsp; /** @todo check this out! */
3008 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
3009 | (3 << IEM_F_X86_CPL_SHIFT)
3010 | IEM_F_MODE_X86_16BIT_PROT_V86;
3011
3012 /* Flush the prefetch buffer. */
3013 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
3014
3015/** @todo single stepping */
3016 return VINF_SUCCESS;
3017}
3018
3019
3020/**
3021 * Implements iret for protected mode returning via a nested task.
3022 *
3023 * @param enmEffOpSize The effective operand size.
3024 */
3025IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
3026{
3027 Log7(("iemCImpl_iret_prot_NestedTask:\n"));
3028#ifndef IEM_IMPLEMENTS_TASKSWITCH
3029 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
3030#else
3031 RT_NOREF_PV(enmEffOpSize);
3032
3033 /*
3034 * Read the segment selector in the link-field of the current TSS.
3035 */
3036 RTSEL uSelRet;
3037 VBOXSTRICTRC rcStrict = iemMemFetchSysU16(pVCpu, &uSelRet, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base);
3038 if (rcStrict != VINF_SUCCESS)
3039 return rcStrict;
3040
3041 /*
3042 * Fetch the returning task's TSS descriptor from the GDT.
3043 */
3044 if (uSelRet & X86_SEL_LDT)
3045 {
3046 Log(("iret_prot_NestedTask TSS not in LDT. uSelRet=%04x -> #TS\n", uSelRet));
3047 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet);
3048 }
3049
3050 IEMSELDESC TssDesc;
3051 rcStrict = iemMemFetchSelDesc(pVCpu, &TssDesc, uSelRet, X86_XCPT_GP);
3052 if (rcStrict != VINF_SUCCESS)
3053 return rcStrict;
3054
3055 if (TssDesc.Legacy.Gate.u1DescType)
3056 {
3057 Log(("iret_prot_NestedTask Invalid TSS type. uSelRet=%04x -> #TS\n", uSelRet));
3058 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3059 }
3060
3061 if ( TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_286_TSS_BUSY
3062 && TssDesc.Legacy.Gate.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY)
3063 {
3064 Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
3065 return iemRaiseTaskSwitchFaultBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3066 }
3067
3068 if (!TssDesc.Legacy.Gate.u1Present)
3069 {
3070 Log(("iret_prot_NestedTask TSS is not present. uSelRet=%04x -> #NP\n", uSelRet));
3071 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSelRet & X86_SEL_MASK_OFF_RPL);
3072 }
3073
3074 uint32_t uNextEip = pVCpu->cpum.GstCtx.eip + cbInstr;
3075 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
3076 0 /* uCr2 */, uSelRet, &TssDesc);
3077#endif
3078}
3079
3080
3081/**
3082 * Implements iret for protected mode
3083 *
3084 * @param enmEffOpSize The effective operand size.
3085 */
3086IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
3087{
3088 NOREF(cbInstr);
3089 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3090
3091 /*
3092 * Nested task return.
3093 */
3094 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3095 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
3096
3097 /*
3098 * Normal return.
3099 *
3100 * Do the stack bits, but don't commit RSP before everything checks
3101 * out right.
3102 */
3103 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
3104 uint8_t bUnmapInfo;
3105 VBOXSTRICTRC rcStrict;
3106 RTCPTRUNION uFrame;
3107 uint16_t uNewCs;
3108 uint32_t uNewEip;
3109 uint32_t uNewFlags;
3110 uint64_t uNewRsp;
3111 if (enmEffOpSize == IEMMODE_32BIT)
3112 {
3113 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 12, 3, &uFrame.pv, &bUnmapInfo, &uNewRsp);
3114 if (rcStrict != VINF_SUCCESS)
3115 return rcStrict;
3116 uNewEip = uFrame.pu32[0];
3117 uNewCs = (uint16_t)uFrame.pu32[1];
3118 uNewFlags = uFrame.pu32[2];
3119 }
3120 else
3121 {
3122 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 6, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
3123 if (rcStrict != VINF_SUCCESS)
3124 return rcStrict;
3125 uNewEip = uFrame.pu16[0];
3126 uNewCs = uFrame.pu16[1];
3127 uNewFlags = uFrame.pu16[2];
3128 }
3129 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */
3130 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3131 { /* extremely likely */ }
3132 else
3133 return rcStrict;
3134 Log7(("iemCImpl_iret_prot: uNewCs=%#06x uNewEip=%#010x uNewFlags=%#x uNewRsp=%#18llx uCpl=%u\n", uNewCs, uNewEip, uNewFlags, uNewRsp, IEM_GET_CPL(pVCpu)));
3135
3136 /*
3137 * We're hopefully not returning to V8086 mode...
3138 */
3139 if ( (uNewFlags & X86_EFL_VM)
3140 && IEM_GET_CPL(pVCpu) == 0)
3141 {
3142 Assert(enmEffOpSize == IEMMODE_32BIT);
3143 return IEM_CIMPL_CALL_4(iemCImpl_iret_prot_v8086, uNewEip, uNewCs, uNewFlags, uNewRsp);
3144 }
3145
3146 /*
3147 * Protected mode.
3148 */
3149 /* Read the CS descriptor. */
3150 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3151 {
3152 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
3153 return iemRaiseGeneralProtectionFault0(pVCpu);
3154 }
3155
3156 IEMSELDESC DescCS;
3157 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3158 if (rcStrict != VINF_SUCCESS)
3159 {
3160 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
3161 return rcStrict;
3162 }
3163
3164 /* Must be a code descriptor. */
3165 if (!DescCS.Legacy.Gen.u1DescType)
3166 {
3167 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3168 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3169 }
3170 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3171 {
3172 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
3173 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3174 }
3175
3176 /* Privilege checks. */
3177 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3178 {
3179 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3180 {
3181 Log(("iret %04x:%08x - RPL != DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3182 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3183 }
3184 }
3185 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3186 {
3187 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
3188 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3189 }
3190 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
3191 {
3192 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, IEM_GET_CPL(pVCpu)));
3193 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3194 }
3195
3196 /* Present? */
3197 if (!DescCS.Legacy.Gen.u1Present)
3198 {
3199 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
3200 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3201 }
3202
3203 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3204
3205 /*
3206 * Return to outer level?
3207 */
3208 if ((uNewCs & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
3209 {
3210 uint16_t uNewSS;
3211 uint32_t uNewESP;
3212 if (enmEffOpSize == IEMMODE_32BIT)
3213 {
3214 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0/*off*/, 8 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp);
3215 if (rcStrict != VINF_SUCCESS)
3216 return rcStrict;
3217/** @todo We might be popping a 32-bit ESP from the IRET frame, but whether
3218 * 16-bit or 32-bit are being loaded into SP depends on the D/B
3219 * bit of the popped SS selector it turns out. */
3220 uNewESP = uFrame.pu32[0];
3221 uNewSS = (uint16_t)uFrame.pu32[1];
3222 }
3223 else
3224 {
3225 rcStrict = iemMemStackPopContinueSpecial(pVCpu, 0 /*off*/, 4 /*cbMem*/, &uFrame.pv, &bUnmapInfo, uNewRsp);
3226 if (rcStrict != VINF_SUCCESS)
3227 return rcStrict;
3228 uNewESP = uFrame.pu16[0];
3229 uNewSS = uFrame.pu16[1];
3230 }
3231 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
3232 if (rcStrict != VINF_SUCCESS)
3233 return rcStrict;
3234 Log7(("iemCImpl_iret_prot: uNewSS=%#06x uNewESP=%#010x\n", uNewSS, uNewESP));
3235
3236 /* Read the SS descriptor. */
3237 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3238 {
3239 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
3240 return iemRaiseGeneralProtectionFault0(pVCpu);
3241 }
3242
3243 IEMSELDESC DescSS;
3244 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
3245 if (rcStrict != VINF_SUCCESS)
3246 {
3247 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
3248 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
3249 return rcStrict;
3250 }
3251
3252 /* Privilege checks. */
3253 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3254 {
3255 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
3256 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3257 }
3258 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3259 {
3260 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
3261 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
3262 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3263 }
3264
3265 /* Must be a writeable data segment descriptor. */
3266 if (!DescSS.Legacy.Gen.u1DescType)
3267 {
3268 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
3269 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3270 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3271 }
3272 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3273 {
3274 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
3275 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
3276 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSS);
3277 }
3278
3279 /* Present? */
3280 if (!DescSS.Legacy.Gen.u1Present)
3281 {
3282 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
3283 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSS);
3284 }
3285
3286 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3287
3288 /* Check EIP. */
3289 if (uNewEip > cbLimitCS)
3290 {
3291 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
3292 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
3293 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3294 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3295 }
3296
3297 /*
3298 * Commit the changes, marking CS and SS accessed first since
3299 * that may fail.
3300 */
3301 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3302 {
3303 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3304 if (rcStrict != VINF_SUCCESS)
3305 return rcStrict;
3306 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3307 }
3308 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3309 {
3310 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3311 if (rcStrict != VINF_SUCCESS)
3312 return rcStrict;
3313 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3314 }
3315
3316 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3317 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3318 if (enmEffOpSize != IEMMODE_16BIT)
3319 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3320 if (IEM_GET_CPL(pVCpu) == 0)
3321 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3322 else if (IEM_GET_CPL(pVCpu) <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3323 fEFlagsMask |= X86_EFL_IF;
3324 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3325 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3326 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3327 fEFlagsNew &= ~fEFlagsMask;
3328 fEFlagsNew |= uNewFlags & fEFlagsMask;
3329#ifdef DBGFTRACE_ENABLED
3330 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
3331 IEM_GET_CPL(pVCpu), uNewCs & X86_SEL_RPL, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3332 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP);
3333#endif
3334
3335 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3336 pVCpu->cpum.GstCtx.rip = uNewEip;
3337 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3338 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3339 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3340 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3341 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3342 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3343
3344 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
3345 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
3346 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3347 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3348 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3349 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3350 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3351 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewESP;
3352 else
3353 pVCpu->cpum.GstCtx.rsp = uNewESP;
3354
3355 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.ds);
3356 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.es);
3357 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.fs);
3358 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCs & X86_SEL_RPL, &pVCpu->cpum.GstCtx.gs);
3359
3360 iemRecalcExecModeAndCplFlags(pVCpu);
3361
3362 /* Done! */
3363
3364 }
3365 /*
3366 * Return to the same level.
3367 */
3368 else
3369 {
3370 /* Check EIP. */
3371 if (uNewEip > cbLimitCS)
3372 {
3373 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
3374 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3375 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3376 }
3377
3378 /*
3379 * Commit the changes, marking CS first since it may fail.
3380 */
3381 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3382 {
3383 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3384 if (rcStrict != VINF_SUCCESS)
3385 return rcStrict;
3386 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3387 }
3388
3389 X86EFLAGS NewEfl;
3390 NewEfl.u = IEMMISC_GET_EFL(pVCpu);
3391 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3392 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3393 if (enmEffOpSize != IEMMODE_16BIT)
3394 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3395 if (IEM_GET_CPL(pVCpu) == 0)
3396 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
3397 else if (IEM_GET_CPL(pVCpu) <= NewEfl.Bits.u2IOPL)
3398 fEFlagsMask |= X86_EFL_IF;
3399 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
3400 fEFlagsMask &= ~(X86_EFL_AC | X86_EFL_ID | X86_EFL_VIF | X86_EFL_VIP);
3401 NewEfl.u &= ~fEFlagsMask;
3402 NewEfl.u |= fEFlagsMask & uNewFlags;
3403#ifdef DBGFTRACE_ENABLED
3404 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
3405 IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
3406 uNewCs, uNewEip, uNewFlags, pVCpu->cpum.GstCtx.ss.Sel, uNewRsp);
3407#endif
3408
3409 IEMMISC_SET_EFL(pVCpu, NewEfl.u);
3410 pVCpu->cpum.GstCtx.rip = uNewEip;
3411 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3412 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3413 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3414 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3415 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3416 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3417 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3418 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3419 else
3420 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3421
3422 iemRecalcExecModeAndCplFlags(pVCpu);
3423
3424 /* Done! */
3425 }
3426
3427 /* Flush the prefetch buffer. */
3428 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if same ring? */
3429
3430/** @todo single stepping */
3431 return VINF_SUCCESS;
3432}
3433
3434
3435/**
3436 * Implements iret for long mode
3437 *
3438 * @param enmEffOpSize The effective operand size.
3439 */
3440IEM_CIMPL_DEF_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize)
3441{
3442 NOREF(cbInstr);
3443
3444 /*
3445 * Nested task return is not supported in long mode.
3446 */
3447 if (pVCpu->cpum.GstCtx.eflags.Bits.u1NT)
3448 {
3449 Log(("iretq with NT=1 (eflags=%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.eflags.u));
3450 return iemRaiseGeneralProtectionFault0(pVCpu);
3451 }
3452
3453 /*
3454 * Normal return.
3455 *
3456 * Do the stack bits, but don't commit RSP before everything checks
3457 * out right.
3458 */
3459 VBOXSTRICTRC rcStrict;
3460 uint8_t bUnmapInfo;
3461 RTCPTRUNION uFrame;
3462 uint64_t uNewRip;
3463 uint16_t uNewCs;
3464 uint16_t uNewSs;
3465 uint32_t uNewFlags;
3466 uint64_t uNewRsp;
3467 if (enmEffOpSize == IEMMODE_64BIT)
3468 {
3469 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*8, 7, &uFrame.pv, &bUnmapInfo, &uNewRsp);
3470 if (rcStrict != VINF_SUCCESS)
3471 return rcStrict;
3472 uNewRip = uFrame.pu64[0];
3473 uNewCs = (uint16_t)uFrame.pu64[1];
3474 uNewFlags = (uint32_t)uFrame.pu64[2];
3475 uNewRsp = uFrame.pu64[3];
3476 uNewSs = (uint16_t)uFrame.pu64[4];
3477 }
3478 else if (enmEffOpSize == IEMMODE_32BIT)
3479 {
3480 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*4, 3, &uFrame.pv, &bUnmapInfo, &uNewRsp);
3481 if (rcStrict != VINF_SUCCESS)
3482 return rcStrict;
3483 uNewRip = uFrame.pu32[0];
3484 uNewCs = (uint16_t)uFrame.pu32[1];
3485 uNewFlags = uFrame.pu32[2];
3486 uNewRsp = uFrame.pu32[3];
3487 uNewSs = (uint16_t)uFrame.pu32[4];
3488 }
3489 else
3490 {
3491 Assert(enmEffOpSize == IEMMODE_16BIT);
3492 rcStrict = iemMemStackPopBeginSpecial(pVCpu, 5*2, 1, &uFrame.pv, &bUnmapInfo, &uNewRsp);
3493 if (rcStrict != VINF_SUCCESS)
3494 return rcStrict;
3495 uNewRip = uFrame.pu16[0];
3496 uNewCs = uFrame.pu16[1];
3497 uNewFlags = uFrame.pu16[2];
3498 uNewRsp = uFrame.pu16[3];
3499 uNewSs = uFrame.pu16[4];
3500 }
3501 rcStrict = iemMemStackPopDoneSpecial(pVCpu, bUnmapInfo); /* don't use iemMemStackPopCommitSpecial here. */
3502 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3503 { /* extremely like */ }
3504 else
3505 return rcStrict;
3506 Log7(("iretq stack: cs:rip=%04x:%016RX64 rflags=%016RX64 ss:rsp=%04x:%016RX64\n", uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp));
3507
3508 /*
3509 * Check stuff.
3510 */
3511 /* Read the CS descriptor. */
3512 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
3513 {
3514 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3515 return iemRaiseGeneralProtectionFault0(pVCpu);
3516 }
3517
3518 IEMSELDESC DescCS;
3519 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCs, X86_XCPT_GP);
3520 if (rcStrict != VINF_SUCCESS)
3521 {
3522 Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
3523 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3524 return rcStrict;
3525 }
3526
3527 /* Must be a code descriptor. */
3528 if ( !DescCS.Legacy.Gen.u1DescType
3529 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3530 {
3531 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
3532 uNewCs, uNewRip, uNewSs, uNewRsp, DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
3533 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3534 }
3535
3536 /* Privilege checks. */
3537 uint8_t const uNewCpl = uNewCs & X86_SEL_RPL;
3538 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF))
3539 {
3540 if ((uNewCs & X86_SEL_RPL) != DescCS.Legacy.Gen.u2Dpl)
3541 {
3542 Log(("iret %04x:%016RX64 - RPL != DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3543 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3544 }
3545 }
3546 else if ((uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
3547 {
3548 Log(("iret %04x:%016RX64 - RPL < DPL (%d) -> #GP\n", uNewCs, uNewRip, DescCS.Legacy.Gen.u2Dpl));
3549 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3550 }
3551 if ((uNewCs & X86_SEL_RPL) < IEM_GET_CPL(pVCpu))
3552 {
3553 Log(("iret %04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, IEM_GET_CPL(pVCpu)));
3554 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs);
3555 }
3556
3557 /* Present? */
3558 if (!DescCS.Legacy.Gen.u1Present)
3559 {
3560 Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3561 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewCs);
3562 }
3563
3564 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3565
3566 /* Read the SS descriptor. */
3567 IEMSELDESC DescSS;
3568 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3569 {
3570 if ( !DescCS.Legacy.Gen.u1Long
3571 || DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
3572 || uNewCpl > 2) /** @todo verify SS=0 impossible for ring-3. */
3573 {
3574 Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3575 return iemRaiseGeneralProtectionFault0(pVCpu);
3576 }
3577 /* Make sure SS is sensible, marked as accessed etc. */
3578 iemMemFakeStackSelDesc(&DescSS, (uNewSs & X86_SEL_RPL));
3579 }
3580 else
3581 {
3582 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
3583 if (rcStrict != VINF_SUCCESS)
3584 {
3585 Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
3586 uNewCs, uNewRip, uNewSs, uNewRsp, VBOXSTRICTRC_VAL(rcStrict)));
3587 return rcStrict;
3588 }
3589 }
3590
3591 /* Privilege checks. */
3592 if ((uNewSs & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
3593 {
3594 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3595 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3596 }
3597
3598 uint32_t cbLimitSs;
3599 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3600 cbLimitSs = UINT32_MAX;
3601 else
3602 {
3603 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
3604 {
3605 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
3606 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u2Dpl));
3607 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3608 }
3609
3610 /* Must be a writeable data segment descriptor. */
3611 if (!DescSS.Legacy.Gen.u1DescType)
3612 {
3613 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
3614 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3615 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3616 }
3617 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
3618 {
3619 Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
3620 uNewCs, uNewRip, uNewSs, uNewRsp, DescSS.Legacy.Gen.u4Type));
3621 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewSs);
3622 }
3623
3624 /* Present? */
3625 if (!DescSS.Legacy.Gen.u1Present)
3626 {
3627 Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3628 return iemRaiseStackSelectorNotPresentBySelector(pVCpu, uNewSs);
3629 }
3630 cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
3631 }
3632
3633 /* Check EIP. */
3634 if (DescCS.Legacy.Gen.u1Long)
3635 {
3636 if (!IEM_IS_CANONICAL(uNewRip))
3637 {
3638 Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
3639 uNewCs, uNewRip, uNewSs, uNewRsp));
3640 return iemRaiseNotCanonical(pVCpu);
3641 }
3642/** @todo check the location of this... Testcase. */
3643 if (RT_LIKELY(!DescCS.Legacy.Gen.u1DefBig))
3644 { /* likely */ }
3645 else
3646 {
3647 Log(("iret %04x:%016RX64/%04x:%016RX64 -> both L and D are set -> #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
3648 return iemRaiseGeneralProtectionFault0(pVCpu);
3649 }
3650 }
3651 else
3652 {
3653 if (uNewRip > cbLimitCS)
3654 {
3655 Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
3656 uNewCs, uNewRip, uNewSs, uNewRsp, cbLimitCS));
3657 /** @todo Which is it, \#GP(0) or \#GP(sel)? */
3658 return iemRaiseSelectorBoundsBySelector(pVCpu, uNewCs);
3659 }
3660 }
3661
3662 /*
3663 * Commit the changes, marking CS and SS accessed first since
3664 * that may fail.
3665 */
3666 /** @todo where exactly are these actually marked accessed by a real CPU? */
3667 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3668 {
3669 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCs);
3670 if (rcStrict != VINF_SUCCESS)
3671 return rcStrict;
3672 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3673 }
3674 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3675 {
3676 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSs);
3677 if (rcStrict != VINF_SUCCESS)
3678 return rcStrict;
3679 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3680 }
3681
3682 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
3683 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
3684 if (enmEffOpSize != IEMMODE_16BIT)
3685 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
3686 if (IEM_GET_CPL(pVCpu) == 0)
3687 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */
3688 else if (IEM_GET_CPL(pVCpu) <= pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL)
3689 fEFlagsMask |= X86_EFL_IF;
3690 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pVCpu);
3691 fEFlagsNew &= ~fEFlagsMask;
3692 fEFlagsNew |= uNewFlags & fEFlagsMask;
3693#ifdef DBGFTRACE_ENABLED
3694 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
3695 IEM_GET_CPL(pVCpu), uNewCpl, pVCpu->cpum.GstCtx.rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp);
3696#endif
3697
3698 IEMMISC_SET_EFL(pVCpu, fEFlagsNew);
3699 pVCpu->cpum.GstCtx.rip = uNewRip;
3700 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
3701 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
3702 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3703 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3704 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3705 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3706 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
3707 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3708 else
3709 pVCpu->cpum.GstCtx.sp = (uint16_t)uNewRsp;
3710 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
3711 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
3712 if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
3713 {
3714 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3715 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_UNUSABLE | (uNewCpl << X86DESCATTR_DPL_SHIFT);
3716 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3717 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3718 Log2(("iretq new SS: NULL\n"));
3719 }
3720 else
3721 {
3722 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3723 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3724 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSs;
3725 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3726 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
3727 }
3728
3729 if (IEM_GET_CPL(pVCpu) != uNewCpl)
3730 {
3731 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.ds);
3732 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.es);
3733 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.fs);
3734 iemHlpAdjustSelectorForNewCpl(pVCpu, uNewCpl, &pVCpu->cpum.GstCtx.gs);
3735 }
3736
3737 iemRecalcExecModeAndCplFlags(pVCpu);
3738
3739 /* Flush the prefetch buffer. */
3740 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo may light flush if the ring + mode doesn't change */
3741
3742/** @todo single stepping */
3743 return VINF_SUCCESS;
3744}
3745
3746
3747/**
3748 * Implements iret.
3749 *
3750 * @param enmEffOpSize The effective operand size.
3751 */
3752IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
3753{
3754 bool fBlockingNmi = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
3755
3756 if (!IEM_IS_IN_GUEST(pVCpu))
3757 { /* probable */ }
3758#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3759 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3760 {
3761 /*
3762 * Record whether NMI (or virtual-NMI) blocking is in effect during the execution
3763 * of this IRET instruction. We need to provide this information as part of some
3764 * VM-exits.
3765 *
3766 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3767 */
3768 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_VIRT_NMI))
3769 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking;
3770 else
3771 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = fBlockingNmi;
3772
3773 /*
3774 * If "NMI exiting" is set, IRET does not affect blocking of NMIs.
3775 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3776 */
3777 if (IEM_VMX_IS_PINCTLS_SET(pVCpu, VMX_PIN_CTLS_NMI_EXIT))
3778 fBlockingNmi = false;
3779
3780 /* Clear virtual-NMI blocking, if any, before causing any further exceptions. */
3781 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
3782 }
3783#endif
3784 /*
3785 * The SVM nested-guest intercept for IRET takes priority over all exceptions,
3786 * The NMI is still held pending (which I assume means blocking of further NMIs
3787 * is in effect).
3788 *
3789 * See AMD spec. 15.9 "Instruction Intercepts".
3790 * See AMD spec. 15.21.9 "NMI Support".
3791 */
3792 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IRET))
3793 {
3794 Log(("iret: Guest intercept -> #VMEXIT\n"));
3795 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
3796 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3797 }
3798
3799 /*
3800 * Clear NMI blocking, if any, before causing any further exceptions.
3801 * See Intel spec. 6.7.1 "Handling Multiple NMIs".
3802 */
3803 if (fBlockingNmi)
3804 CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
3805
3806 /*
3807 * Call a mode specific worker.
3808 */
3809 VBOXSTRICTRC rcStrict;
3810 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
3811 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
3812 else
3813 {
3814 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3815 if (IEM_IS_64BIT_CODE(pVCpu))
3816 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_64bit, enmEffOpSize);
3817 else
3818 rcStrict = IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
3819 }
3820
3821#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3822 /*
3823 * Clear NMI unblocking IRET state with the completion of IRET.
3824 */
3825 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3826 pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret = false;
3827#endif
3828 return rcStrict;
3829}
3830
3831
3832static void iemLoadallSetSelector(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
3833{
3834 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
3835
3836 pHid->Sel = uSel;
3837 pHid->ValidSel = uSel;
3838 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
3839}
3840
3841
3842static void iemLoadall286SetDescCache(PVMCPUCC pVCpu, uint8_t iSegReg, uint8_t const *pbMem)
3843{
3844 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
3845
3846 /* The base is in the first three bytes. */
3847 pHid->u64Base = pbMem[0] + (pbMem[1] << 8) + (pbMem[2] << 16);
3848 /* The attributes are in the fourth byte. */
3849 pHid->Attr.u = pbMem[3];
3850 pHid->Attr.u &= ~(X86DESCATTR_L | X86DESCATTR_D); /* (just to be on the safe side) */
3851 /* The limit is in the last two bytes. */
3852 pHid->u32Limit = pbMem[4] + (pbMem[5] << 8);
3853}
3854
3855
3856/**
3857 * Implements 286 LOADALL (286 CPUs only).
3858 */
3859IEM_CIMPL_DEF_0(iemCImpl_loadall286)
3860{
3861 NOREF(cbInstr);
3862
3863 /* Data is loaded from a buffer at 800h. No checks are done on the
3864 * validity of loaded state.
3865 *
3866 * LOADALL only loads the internal CPU state, it does not access any
3867 * GDT, LDT, or similar tables.
3868 */
3869
3870 if (IEM_GET_CPL(pVCpu) != 0)
3871 {
3872 Log(("loadall286: CPL must be 0 not %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
3873 return iemRaiseGeneralProtectionFault0(pVCpu);
3874 }
3875
3876 uint8_t bUnmapInfo;
3877 uint8_t const *pbMem = NULL;
3878 RTGCPHYS GCPtrStart = 0x800; /* Fixed table location. */
3879 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pbMem, &bUnmapInfo, 0x66, UINT8_MAX, GCPtrStart, IEM_ACCESS_SYS_R, 0);
3880 if (rcStrict != VINF_SUCCESS)
3881 return rcStrict;
3882
3883 /* The MSW is at offset 0x06. */
3884 uint16_t const *pau16Mem = (uint16_t const *)(pbMem + 0x06);
3885 /* Even LOADALL can't clear the MSW.PE bit, though it can set it. */
3886 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3887 uNewCr0 |= *pau16Mem & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3888 uint64_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
3889
3890 CPUMSetGuestCR0(pVCpu, uNewCr0);
3891 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCr0);
3892
3893 /* Inform PGM if mode changed. */
3894 if ((uNewCr0 & X86_CR0_PE) != (uOldCr0 & X86_CR0_PE))
3895 {
3896 int rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
3897 AssertRCReturn(rc, rc);
3898 /* ignore informational status codes */
3899 }
3900 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
3901 false /* fForce */);
3902
3903 /* TR selector is at offset 0x16. */
3904 pau16Mem = (uint16_t const *)(pbMem + 0x16);
3905 pVCpu->cpum.GstCtx.tr.Sel = pau16Mem[0];
3906 pVCpu->cpum.GstCtx.tr.ValidSel = pau16Mem[0];
3907 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
3908
3909 /* Followed by FLAGS... */
3910 pVCpu->cpum.GstCtx.eflags.u = pau16Mem[1] | X86_EFL_1;
3911 pVCpu->cpum.GstCtx.ip = pau16Mem[2]; /* ...and IP. */
3912
3913 /* LDT is at offset 0x1C. */
3914 pau16Mem = (uint16_t const *)(pbMem + 0x1C);
3915 pVCpu->cpum.GstCtx.ldtr.Sel = pau16Mem[0];
3916 pVCpu->cpum.GstCtx.ldtr.ValidSel = pau16Mem[0];
3917 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3918
3919 /* Segment registers are at offset 0x1E. */
3920 pau16Mem = (uint16_t const *)(pbMem + 0x1E);
3921 iemLoadallSetSelector(pVCpu, X86_SREG_DS, pau16Mem[0]);
3922 iemLoadallSetSelector(pVCpu, X86_SREG_SS, pau16Mem[1]);
3923 iemLoadallSetSelector(pVCpu, X86_SREG_CS, pau16Mem[2]);
3924 iemLoadallSetSelector(pVCpu, X86_SREG_ES, pau16Mem[3]);
3925
3926 /* GPRs are at offset 0x26. */
3927 pau16Mem = (uint16_t const *)(pbMem + 0x26);
3928 pVCpu->cpum.GstCtx.di = pau16Mem[0];
3929 pVCpu->cpum.GstCtx.si = pau16Mem[1];
3930 pVCpu->cpum.GstCtx.bp = pau16Mem[2];
3931 pVCpu->cpum.GstCtx.sp = pau16Mem[3];
3932 pVCpu->cpum.GstCtx.bx = pau16Mem[4];
3933 pVCpu->cpum.GstCtx.dx = pau16Mem[5];
3934 pVCpu->cpum.GstCtx.cx = pau16Mem[6];
3935 pVCpu->cpum.GstCtx.ax = pau16Mem[7];
3936
3937 /* Descriptor caches are at offset 0x36, 6 bytes per entry. */
3938 iemLoadall286SetDescCache(pVCpu, X86_SREG_ES, pbMem + 0x36);
3939 iemLoadall286SetDescCache(pVCpu, X86_SREG_CS, pbMem + 0x3C);
3940 iemLoadall286SetDescCache(pVCpu, X86_SREG_SS, pbMem + 0x42);
3941 iemLoadall286SetDescCache(pVCpu, X86_SREG_DS, pbMem + 0x48);
3942
3943 /* GDTR contents are at offset 0x4E, 6 bytes. */
3944 uint8_t const *pau8Mem = pbMem + 0x4E;
3945 /* NB: Fourth byte "should be zero"; we are ignoring it. */
3946 RTGCPHYS GCPtrBase = pau8Mem[0] + ((uint32_t)pau8Mem[1] << 8) + ((uint32_t)pau8Mem[2] << 16);
3947 uint16_t cbLimit = pau8Mem[4] + ((uint32_t)pau8Mem[5] << 8);
3948 CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
3949
3950 /* IDTR contents are at offset 0x5A, 6 bytes. */
3951 pau8Mem = pbMem + 0x5A;
3952 GCPtrBase = pau8Mem[0] + ((uint32_t)pau8Mem[1] << 8) + ((uint32_t)pau8Mem[2] << 16);
3953 cbLimit = pau8Mem[4] + ((uint32_t)pau8Mem[5] << 8);
3954 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
3955
3956 Log(("LOADALL: GDTR:%08RX64/%04X, IDTR:%08RX64/%04X\n", pVCpu->cpum.GstCtx.gdtr.pGdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.idtr.pIdt, pVCpu->cpum.GstCtx.idtr.cbIdt));
3957 Log(("LOADALL: CS:%04X, CS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.cs.u64Base, pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.Attr.u));
3958 Log(("LOADALL: DS:%04X, DS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.ds.u64Base, pVCpu->cpum.GstCtx.ds.u32Limit, pVCpu->cpum.GstCtx.ds.Attr.u));
3959 Log(("LOADALL: ES:%04X, ES base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.es.Sel, pVCpu->cpum.GstCtx.es.u64Base, pVCpu->cpum.GstCtx.es.u32Limit, pVCpu->cpum.GstCtx.es.Attr.u));
3960 Log(("LOADALL: SS:%04X, SS base:%08X, limit:%04X, attrs:%02X\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ss.u64Base, pVCpu->cpum.GstCtx.ss.u32Limit, pVCpu->cpum.GstCtx.ss.Attr.u));
3961 Log(("LOADALL: SI:%04X, DI:%04X, AX:%04X, BX:%04X, CX:%04X, DX:%04X\n", pVCpu->cpum.GstCtx.si, pVCpu->cpum.GstCtx.di, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.bx, pVCpu->cpum.GstCtx.cx, pVCpu->cpum.GstCtx.dx));
3962
3963 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
3964 if (rcStrict != VINF_SUCCESS)
3965 return rcStrict;
3966
3967 /*
3968 * The CPL may change and protected mode may change enabled. It is taken
3969 * from the "DPL fields of the SS and CS descriptor caches" but there is no
3970 * word as to what happens if those are not identical (probably bad things).
3971 */
3972 iemRecalcExecModeAndCplFlags(pVCpu);
3973 Assert(IEM_IS_16BIT_CODE(pVCpu));
3974
3975 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS | CPUM_CHANGED_IDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_TR | CPUM_CHANGED_LDTR);
3976
3977 /* Flush the prefetch buffer. */
3978 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
3979
3980/** @todo single stepping */
3981 return rcStrict;
3982}
3983
3984
3985/**
3986 * Implements SYSCALL (AMD and Intel64).
3987 */
3988IEM_CIMPL_DEF_0(iemCImpl_syscall)
3989{
3990 /** @todo hack, LOADALL should be decoded as such on a 286. */
3991 if (RT_UNLIKELY(pVCpu->iem.s.uTargetCpu == IEMTARGETCPU_286))
3992 return iemCImpl_loadall286(pVCpu, cbInstr);
3993
3994 /*
3995 * Check preconditions.
3996 *
3997 * Note that CPUs described in the documentation may load a few odd values
3998 * into CS and SS than we allow here. This has yet to be checked on real
3999 * hardware.
4000 */
4001 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4002 {
4003 Log(("syscall: Not enabled in EFER -> #UD\n"));
4004 return iemRaiseUndefinedOpcode(pVCpu);
4005 }
4006 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4007 {
4008 Log(("syscall: Protected mode is required -> #GP(0)\n"));
4009 return iemRaiseGeneralProtectionFault0(pVCpu);
4010 }
4011 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4012 {
4013 Log(("syscall: Only available in long mode on intel -> #UD\n"));
4014 return iemRaiseUndefinedOpcode(pVCpu);
4015 }
4016
4017 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4018
4019 /** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
4020 /** @todo what about LDT selectors? Shouldn't matter, really. */
4021 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSCALL_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4022 uint16_t uNewSs = uNewCs + 8;
4023 if (uNewCs == 0 || uNewSs == 0)
4024 {
4025 /** @todo Neither Intel nor AMD document this check. */
4026 Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4027 return iemRaiseGeneralProtectionFault0(pVCpu);
4028 }
4029
4030 /* Long mode and legacy mode differs. */
4031 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4032 {
4033 uint64_t uNewRip = IEM_IS_64BIT_CODE(pVCpu) ? pVCpu->cpum.GstCtx.msrLSTAR : pVCpu->cpum.GstCtx. msrCSTAR;
4034
4035 /* This test isn't in the docs, but I'm not trusting the guys writing
4036 the MSRs to have validated the values as canonical like they should. */
4037 if (!IEM_IS_CANONICAL(uNewRip))
4038 {
4039 /** @todo Intel claims this can't happen because IA32_LSTAR MSR can't be written with non-canonical address. */
4040 Log(("syscall: New RIP not canonical -> #UD\n"));
4041 return iemRaiseUndefinedOpcode(pVCpu);
4042 }
4043
4044 /*
4045 * Commit it.
4046 */
4047 Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, uNewRip));
4048 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.rip + cbInstr;
4049 pVCpu->cpum.GstCtx.rip = uNewRip;
4050
4051 pVCpu->cpum.GstCtx.rflags.u &= ~X86_EFL_RF;
4052 pVCpu->cpum.GstCtx.r11 = pVCpu->cpum.GstCtx.rflags.u;
4053 pVCpu->cpum.GstCtx.rflags.u &= ~pVCpu->cpum.GstCtx.msrSFMASK;
4054 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_RA1_MASK;
4055
4056 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4057 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4058
4059 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4060 | IEM_F_MODE_X86_64BIT;
4061 }
4062 else
4063 {
4064 /*
4065 * Commit it.
4066 */
4067 Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, (uint32_t)(pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
4068 pVCpu->cpum.GstCtx.rcx = pVCpu->cpum.GstCtx.eip + cbInstr;
4069 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK;
4070 pVCpu->cpum.GstCtx.rflags.u &= ~(X86_EFL_VM | X86_EFL_IF | X86_EFL_RF);
4071
4072 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
4073 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
4074
4075 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4076 | IEM_F_MODE_X86_32BIT_PROT
4077 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4078 }
4079 pVCpu->cpum.GstCtx.cs.Sel = uNewCs;
4080 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs;
4081 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4082 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4083 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4084
4085 pVCpu->cpum.GstCtx.ss.Sel = uNewSs;
4086 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs;
4087 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4088 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4089 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4090
4091 /* Flush the prefetch buffer. */
4092 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4093
4094/** @todo single step */
4095 return VINF_SUCCESS;
4096}
4097
4098
4099/**
4100 * Implements SYSRET (AMD and Intel64).
4101 *
4102 * @param enmEffOpSize The effective operand size.
4103 */
4104IEM_CIMPL_DEF_1(iemCImpl_sysret, IEMMODE, enmEffOpSize)
4105
4106{
4107 RT_NOREF_PV(cbInstr);
4108
4109 /*
4110 * Check preconditions.
4111 *
4112 * Note that CPUs described in the documentation may load a few odd values
4113 * into CS and SS than we allow here. This has yet to be checked on real
4114 * hardware.
4115 */
4116 if (!(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SCE))
4117 {
4118 Log(("sysret: Not enabled in EFER -> #UD\n"));
4119 return iemRaiseUndefinedOpcode(pVCpu);
4120 }
4121 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4122 {
4123 Log(("sysret: Only available in long mode on intel -> #UD\n"));
4124 return iemRaiseUndefinedOpcode(pVCpu);
4125 }
4126 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4127 {
4128 Log(("sysret: Protected mode is required -> #GP(0)\n"));
4129 return iemRaiseGeneralProtectionFault0(pVCpu);
4130 }
4131 if (IEM_GET_CPL(pVCpu) != 0)
4132 {
4133 Log(("sysret: CPL must be 0 not %u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
4134 return iemRaiseGeneralProtectionFault0(pVCpu);
4135 }
4136
4137 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS);
4138
4139 /** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
4140 uint16_t uNewCs = (pVCpu->cpum.GstCtx.msrSTAR >> MSR_K6_STAR_SYSRET_CS_SS_SHIFT) & X86_SEL_MASK_OFF_RPL;
4141 uint16_t uNewSs = uNewCs + 8;
4142 if (enmEffOpSize == IEMMODE_64BIT)
4143 uNewCs += 16;
4144 if (uNewCs == 0 || uNewSs == 0)
4145 {
4146 Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
4147 return iemRaiseGeneralProtectionFault0(pVCpu);
4148 }
4149
4150 /*
4151 * Commit it.
4152 */
4153 bool f32Bit = true;
4154 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4155 {
4156 if (enmEffOpSize == IEMMODE_64BIT)
4157 {
4158 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.r11));
4159 /* Note! We disregard intel manual regarding the RCX canonical
4160 check, ask intel+xen why AMD doesn't do it. */
4161 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4162 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4163 | (3 << X86DESCATTR_DPL_SHIFT);
4164 f32Bit = false;
4165 }
4166 else
4167 {
4168 Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.r11));
4169 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.ecx;
4170 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4171 | (3 << X86DESCATTR_DPL_SHIFT);
4172 }
4173 /** @todo testcase: See what kind of flags we can make SYSRET restore and
4174 * what it really ignores. RF and VM are hinted at being zero, by AMD.
4175 * Intel says: RFLAGS := (R11 & 3C7FD7H) | 2; */
4176 pVCpu->cpum.GstCtx.rflags.u = pVCpu->cpum.GstCtx.r11 & (X86_EFL_POPF_BITS | X86_EFL_VIF | X86_EFL_VIP);
4177 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_RA1_MASK;
4178 }
4179 else
4180 {
4181 Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, uNewCs, pVCpu->cpum.GstCtx.ecx));
4182 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rcx;
4183 pVCpu->cpum.GstCtx.rflags.u |= X86_EFL_IF;
4184 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
4185 | (3 << X86DESCATTR_DPL_SHIFT);
4186 }
4187 pVCpu->cpum.GstCtx.cs.Sel = uNewCs | 3;
4188 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs | 3;
4189 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4190 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4191 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4192
4193 /* The SS hidden bits remains unchanged says AMD, we presume they set DPL to 3.
4194 Intel (and presuably VIA) OTOH sets loads valid ring-3 values it seems, see
4195 X86_BUG_SYSRET_SS_ATTRS in linux 5.3. */
4196 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
4197 {
4198 Log(("sysret: ss:rsp=%04x:%08RX64 attr=%x -> %04x:%08RX64 attr=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.ss.Attr.u, uNewSs | 3, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.ss.Attr.u | (3 << X86DESCATTR_DPL_SHIFT) ));
4199 pVCpu->cpum.GstCtx.ss.Attr.u |= (3 << X86DESCATTR_DPL_SHIFT);
4200 }
4201 else
4202 {
4203 Log(("sysret: ss:rsp=%04x:%08RX64 attr=%x -> %04x:%08RX64 attr=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.ss.Attr.u, uNewSs | 3, pVCpu->cpum.GstCtx.rsp, X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT) ));
4204 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC
4205 | (3 << X86DESCATTR_DPL_SHIFT);
4206 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4207 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4208 }
4209 pVCpu->cpum.GstCtx.ss.Sel = uNewSs | 3;
4210 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSs | 3;
4211 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4212 /** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
4213 * on sysret on AMD and not on intel. */
4214
4215 if (!f32Bit)
4216 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4217 | (3 << IEM_F_X86_CPL_SHIFT)
4218 | IEM_F_MODE_X86_64BIT;
4219 else
4220 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4221 | (3 << IEM_F_X86_CPL_SHIFT)
4222 | IEM_F_MODE_X86_32BIT_PROT
4223 /** @todo sort out the SS.BASE/LIM/ATTR claim by AMD and maybe we can switch to
4224 * iemCalc32BitFlatIndicatorDsEs and move this up into the above branch. */
4225 | iemCalc32BitFlatIndicator(pVCpu);
4226
4227 /* Flush the prefetch buffer. */
4228 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4229
4230/** @todo single step */
4231 return VINF_SUCCESS;
4232}
4233
4234
4235/**
4236 * Implements SYSENTER (Intel, 32-bit AMD).
4237 */
4238IEM_CIMPL_DEF_0(iemCImpl_sysenter)
4239{
4240 RT_NOREF(cbInstr);
4241
4242 /*
4243 * Check preconditions.
4244 *
4245 * Note that CPUs described in the documentation may load a few odd values
4246 * into CS and SS than we allow here. This has yet to be checked on real
4247 * hardware.
4248 */
4249 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4250 {
4251 Log(("sysenter: not supported -=> #UD\n"));
4252 return iemRaiseUndefinedOpcode(pVCpu);
4253 }
4254 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4255 {
4256 Log(("sysenter: Protected or long mode is required -> #GP(0)\n"));
4257 return iemRaiseGeneralProtectionFault0(pVCpu);
4258 }
4259 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4260 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4261 {
4262 Log(("sysenter: Only available in protected mode on AMD -> #UD\n"));
4263 return iemRaiseUndefinedOpcode(pVCpu);
4264 }
4265 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4266 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4267 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4268 {
4269 Log(("sysenter: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4270 return iemRaiseGeneralProtectionFault0(pVCpu);
4271 }
4272
4273 /* This test isn't in the docs, it's just a safeguard against missing
4274 canonical checks when writing the registers. */
4275 if (RT_LIKELY( !fIsLongMode
4276 || ( IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.eip)
4277 && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.SysEnter.esp))))
4278 { /* likely */ }
4279 else
4280 {
4281 Log(("sysenter: SYSENTER_EIP = %#RX64 or/and SYSENTER_ESP = %#RX64 not canonical -> #GP(0)\n",
4282 pVCpu->cpum.GstCtx.SysEnter.eip, pVCpu->cpum.GstCtx.SysEnter.esp));
4283 return iemRaiseUndefinedOpcode(pVCpu);
4284 }
4285
4286/** @todo Test: Sysenter from ring-0, ring-1 and ring-2. */
4287
4288 /*
4289 * Update registers and commit.
4290 */
4291 if (fIsLongMode)
4292 {
4293 Log(("sysenter: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
4294 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, pVCpu->cpum.GstCtx.SysEnter.eip));
4295 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.SysEnter.eip;
4296 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.SysEnter.esp;
4297 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4298 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4299 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4300 | IEM_F_MODE_X86_64BIT;
4301 }
4302 else
4303 {
4304 Log(("sysenter: %04x:%08RX32 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, (uint32_t)pVCpu->cpum.GstCtx.rip,
4305 pVCpu->cpum.GstCtx.rflags.u, uNewCs & X86_SEL_MASK_OFF_RPL, (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip));
4306 pVCpu->cpum.GstCtx.rip = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.eip;
4307 pVCpu->cpum.GstCtx.rsp = (uint32_t)pVCpu->cpum.GstCtx.SysEnter.esp;
4308 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4309 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC;
4310 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4311 | IEM_F_MODE_X86_32BIT_PROT
4312 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4313 }
4314 pVCpu->cpum.GstCtx.cs.Sel = uNewCs & X86_SEL_MASK_OFF_RPL;
4315 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCs & X86_SEL_MASK_OFF_RPL;
4316 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4317 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4318 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4319
4320 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4321 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs & X86_SEL_MASK_OFF_RPL) + 8;
4322 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4323 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4324 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4325 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC;
4326 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4327
4328 pVCpu->cpum.GstCtx.rflags.Bits.u1IF = 0;
4329 pVCpu->cpum.GstCtx.rflags.Bits.u1VM = 0;
4330 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4331
4332 /* Flush the prefetch buffer. */
4333 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4334
4335/** @todo single stepping */
4336 return VINF_SUCCESS;
4337}
4338
4339
4340/**
4341 * Implements SYSEXIT (Intel, 32-bit AMD).
4342 *
4343 * @param enmEffOpSize The effective operand size.
4344 */
4345IEM_CIMPL_DEF_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize)
4346{
4347 RT_NOREF(cbInstr);
4348
4349 /*
4350 * Check preconditions.
4351 *
4352 * Note that CPUs described in the documentation may load a few odd values
4353 * into CS and SS than we allow here. This has yet to be checked on real
4354 * hardware.
4355 */
4356 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSysEnter)
4357 {
4358 Log(("sysexit: not supported -=> #UD\n"));
4359 return iemRaiseUndefinedOpcode(pVCpu);
4360 }
4361 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
4362 {
4363 Log(("sysexit: Protected or long mode is required -> #GP(0)\n"));
4364 return iemRaiseGeneralProtectionFault0(pVCpu);
4365 }
4366 bool fIsLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
4367 if (IEM_IS_GUEST_CPU_AMD(pVCpu) && fIsLongMode)
4368 {
4369 Log(("sysexit: Only available in protected mode on AMD -> #UD\n"));
4370 return iemRaiseUndefinedOpcode(pVCpu);
4371 }
4372 if (IEM_GET_CPL(pVCpu) != 0)
4373 {
4374 Log(("sysexit: CPL(=%u) != 0 -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
4375 return iemRaiseGeneralProtectionFault0(pVCpu);
4376 }
4377 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
4378 uint16_t uNewCs = pVCpu->cpum.GstCtx.SysEnter.cs;
4379 if ((uNewCs & X86_SEL_MASK_OFF_RPL) == 0)
4380 {
4381 Log(("sysexit: SYSENTER_CS = %#x -> #GP(0)\n", uNewCs));
4382 return iemRaiseGeneralProtectionFault0(pVCpu);
4383 }
4384
4385 /*
4386 * Update registers and commit.
4387 */
4388 if (enmEffOpSize == IEMMODE_64BIT)
4389 {
4390 Log(("sysexit: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
4391 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 32, pVCpu->cpum.GstCtx.rcx));
4392 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rdx;
4393 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.rcx;
4394 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_L | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4395 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4396 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 32;
4397 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 32;
4398 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 40;
4399 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 40;
4400
4401 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4402 | (3 << IEM_F_X86_CPL_SHIFT)
4403 | IEM_F_MODE_X86_64BIT;
4404 }
4405 else
4406 {
4407 Log(("sysexit: %04x:%08RX64 [efl=%#llx] -> %04x:%08RX32\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
4408 pVCpu->cpum.GstCtx.rflags.u, (uNewCs | 3) + 16, (uint32_t)pVCpu->cpum.GstCtx.edx));
4409 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.edx;
4410 pVCpu->cpum.GstCtx.rsp = pVCpu->cpum.GstCtx.ecx;
4411 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4412 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_ER_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4413 pVCpu->cpum.GstCtx.cs.Sel = (uNewCs | 3) + 16;
4414 pVCpu->cpum.GstCtx.cs.ValidSel = (uNewCs | 3) + 16;
4415 pVCpu->cpum.GstCtx.ss.Sel = (uNewCs | 3) + 24;
4416 pVCpu->cpum.GstCtx.ss.ValidSel = (uNewCs | 3) + 24;
4417
4418 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK))
4419 | (3 << IEM_F_X86_CPL_SHIFT)
4420 | IEM_F_MODE_X86_32BIT_PROT
4421 | iemCalc32BitFlatIndicatorEsDs(pVCpu);
4422 }
4423 pVCpu->cpum.GstCtx.cs.u64Base = 0;
4424 pVCpu->cpum.GstCtx.cs.u32Limit = UINT32_MAX;
4425 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4426
4427 pVCpu->cpum.GstCtx.ss.u64Base = 0;
4428 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
4429 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_P | X86DESCATTR_DT
4430 | X86DESCATTR_LIMIT_HIGH | X86_SEL_TYPE_RW_ACC | (3 << X86DESCATTR_DPL_SHIFT);
4431 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4432 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
4433
4434/** @todo single stepping */
4435
4436 /* Flush the prefetch buffer. */
4437 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr);
4438
4439 return VINF_SUCCESS;
4440}
4441
4442
4443/**
4444 * Completes a MOV SReg,XXX or POP SReg instruction.
4445 *
4446 * When not modifying SS or when we're already in an interrupt shadow we
4447 * can update RIP and finish the instruction the normal way.
4448 *
4449 * Otherwise, the MOV/POP SS interrupt shadow that we now enable will block
4450 * both TF and DBx events. The TF will be ignored while the DBx ones will
4451 * be delayed till the next instruction boundrary. For more details see
4452 * @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching Stacks}.
4453 */
4454DECLINLINE(VBOXSTRICTRC) iemCImpl_LoadSRegFinish(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iSegReg)
4455{
4456 if (iSegReg != X86_SREG_SS || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
4457 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4458
4459 iemRegAddToRip(pVCpu, cbInstr);
4460 pVCpu->cpum.GstCtx.eflags.uBoth &= ~X86_EFL_RF; /* Shadow int isn't set and DRx is delayed, so only clear RF. */
4461 CPUMSetInInterruptShadowSs(&pVCpu->cpum.GstCtx);
4462
4463 return VINF_SUCCESS;
4464}
4465
4466
4467/**
4468 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
4469 *
4470 * @param pVCpu The cross context virtual CPU structure of the calling
4471 * thread.
4472 * @param iSegReg The segment register number (valid).
4473 * @param uSel The new selector value.
4474 */
4475static VBOXSTRICTRC iemCImpl_LoadSRegWorker(PVMCPUCC pVCpu, uint8_t iSegReg, uint16_t uSel)
4476{
4477 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4478 uint16_t *pSel = iemSRegRef(pVCpu, iSegReg);
4479 PCPUMSELREGHID pHid = iemSRegGetHid(pVCpu, iSegReg);
4480
4481 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
4482
4483 /*
4484 * Real mode and V8086 mode are easy.
4485 */
4486 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4487 {
4488 *pSel = uSel;
4489 pHid->u64Base = (uint32_t)uSel << 4;
4490 pHid->ValidSel = uSel;
4491 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4492#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
4493 /** @todo Does the CPU actually load limits and attributes in the
4494 * real/V8086 mode segment load case? It doesn't for CS in far
4495 * jumps... Affects unreal mode. */
4496 pHid->u32Limit = 0xffff;
4497 pHid->Attr.u = 0;
4498 pHid->Attr.n.u1Present = 1;
4499 pHid->Attr.n.u1DescType = 1;
4500 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
4501 ? X86_SEL_TYPE_RW
4502 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
4503#endif
4504
4505 /* Update the FLAT 32-bit mode flag, if we're in 32-bit unreal mode (unlikely): */
4506 if (RT_LIKELY(!IEM_IS_32BIT_CODE(pVCpu)))
4507 { /* likely */ }
4508 else if (uSel != 0)
4509 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
4510 else
4511 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
4512 | iemCalc32BitFlatIndicator(pVCpu);
4513 }
4514 /*
4515 * Protected / long mode - null segment.
4516 *
4517 * Check if it's a null segment selector value first, that's OK for DS, ES,
4518 * FS and GS. If not null, then we have to load and parse the descriptor.
4519 */
4520 else if (!(uSel & X86_SEL_MASK_OFF_RPL))
4521 {
4522 Assert(iSegReg != X86_SREG_CS); /** @todo testcase for \#UD on MOV CS, ax! */
4523 if (iSegReg == X86_SREG_SS)
4524 {
4525 /* In 64-bit kernel mode, the stack can be 0 because of the way
4526 interrupts are dispatched. AMD seems to have a slighly more
4527 relaxed relationship to SS.RPL than intel does. */
4528 /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
4529 if ( !IEM_IS_64BIT_CODE(pVCpu)
4530 || IEM_GET_CPL(pVCpu) > 2
4531 || ( uSel != IEM_GET_CPL(pVCpu)
4532 && !IEM_IS_GUEST_CPU_AMD(pVCpu)) )
4533 {
4534 Log(("load sreg %#x -> invalid stack selector, #GP(0)\n", uSel));
4535 return iemRaiseGeneralProtectionFault0(pVCpu);
4536 }
4537 }
4538
4539 *pSel = uSel; /* Not RPL, remember :-) */
4540 iemHlpLoadNullDataSelectorProt(pVCpu, pHid, uSel);
4541 if (iSegReg == X86_SREG_SS)
4542 pHid->Attr.u |= IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT;
4543
4544 /* This will affect the FLAT 32-bit mode flag: */
4545 if ( iSegReg < X86_SREG_FS
4546 && IEM_IS_32BIT_CODE(pVCpu))
4547 pVCpu->iem.s.fExec &= ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK;
4548 }
4549 /*
4550 * Protected / long mode.
4551 */
4552 else
4553 {
4554 /* Fetch the descriptor. */
4555 IEMSELDESC Desc;
4556 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
4557 if (rcStrict != VINF_SUCCESS)
4558 return rcStrict;
4559
4560 /* Check GPs first. */
4561 if (!Desc.Legacy.Gen.u1DescType)
4562 {
4563 Log(("load sreg %d (=%#x) - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
4564 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4565 }
4566 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
4567 {
4568 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4569 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
4570 {
4571 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
4572 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4573 }
4574 if ((uSel & X86_SEL_RPL) != IEM_GET_CPL(pVCpu))
4575 {
4576 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, IEM_GET_CPL(pVCpu)));
4577 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4578 }
4579 if (Desc.Legacy.Gen.u2Dpl != IEM_GET_CPL(pVCpu))
4580 {
4581 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
4582 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4583 }
4584 }
4585 else
4586 {
4587 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4588 {
4589 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
4590 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4591 }
4592 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4593 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4594 {
4595#if 0 /* this is what intel says. */
4596 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4597 && IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4598 {
4599 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
4600 iSegReg, uSel, (uSel & X86_SEL_RPL), IEM_GET_CPL(pVCpu), Desc.Legacy.Gen.u2Dpl));
4601 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4602 }
4603#else /* this is what makes more sense. */
4604 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4605 {
4606 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
4607 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
4608 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4609 }
4610 if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4611 {
4612 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
4613 iSegReg, uSel, IEM_GET_CPL(pVCpu), Desc.Legacy.Gen.u2Dpl));
4614 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uSel);
4615 }
4616#endif
4617 }
4618 }
4619
4620 /* Is it there? */
4621 if (!Desc.Legacy.Gen.u1Present)
4622 {
4623 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
4624 return iemRaiseSelectorNotPresentBySelector(pVCpu, uSel);
4625 }
4626
4627 /* The base and limit. */
4628 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4629 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4630
4631 /*
4632 * Ok, everything checked out fine. Now set the accessed bit before
4633 * committing the result into the registers.
4634 */
4635 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4636 {
4637 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4638 if (rcStrict != VINF_SUCCESS)
4639 return rcStrict;
4640 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4641 }
4642
4643 /* commit */
4644 *pSel = uSel;
4645 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4646 pHid->u32Limit = cbLimit;
4647 pHid->u64Base = u64Base;
4648 pHid->ValidSel = uSel;
4649 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
4650
4651 /** @todo check if the hidden bits are loaded correctly for 64-bit
4652 * mode. */
4653
4654 /* This will affect the FLAT 32-bit mode flag: */
4655 if ( iSegReg < X86_SREG_FS
4656 && IEM_IS_32BIT_CODE(pVCpu))
4657 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK)
4658 | iemCalc32BitFlatIndicator(pVCpu);
4659 }
4660
4661 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pHid));
4662 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4663 return VINF_SUCCESS;
4664}
4665
4666
4667/**
4668 * Implements 'mov SReg, r/m'.
4669 *
4670 * @param iSegReg The segment register number (valid).
4671 * @param uSel The new selector value.
4672 */
4673IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
4674{
4675 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4676 if (rcStrict == VINF_SUCCESS)
4677 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4678 return rcStrict;
4679}
4680
4681
4682/**
4683 * Implements 'pop SReg'.
4684 *
4685 * @param iSegReg The segment register number (valid).
4686 * @param enmEffOpSize The efficient operand size (valid).
4687 */
4688IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
4689{
4690 VBOXSTRICTRC rcStrict;
4691
4692 /*
4693 * Read the selector off the stack and join paths with mov ss, reg.
4694 */
4695 RTUINT64U TmpRsp;
4696 TmpRsp.u = pVCpu->cpum.GstCtx.rsp;
4697 switch (enmEffOpSize)
4698 {
4699 case IEMMODE_16BIT:
4700 {
4701 uint16_t uSel;
4702 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4703 if (rcStrict == VINF_SUCCESS)
4704 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4705 break;
4706 }
4707
4708 case IEMMODE_32BIT:
4709 {
4710 /* Modern Intel CPU only does a WORD sized access here, both as
4711 segmentation and paging is concerned. So, we have to emulate
4712 this to make bs3-cpu-weird-1 happy. */
4713 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4714 {
4715 /* We don't have flexible enough stack primitives here, so just
4716 do a word pop and add two bytes to SP/RSP on success. */
4717 uint16_t uSel;
4718 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4719 if (rcStrict == VINF_SUCCESS)
4720 {
4721 iemRegAddToRspEx(pVCpu, &TmpRsp, sizeof(uint32_t) - sizeof(uint16_t));
4722 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4723 }
4724 }
4725 else
4726 {
4727 uint32_t u32Value;
4728 rcStrict = iemMemStackPopU32Ex(pVCpu, &u32Value, &TmpRsp);
4729 if (rcStrict == VINF_SUCCESS)
4730 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u32Value);
4731 }
4732 break;
4733 }
4734
4735 case IEMMODE_64BIT:
4736 {
4737 /* Like for the 32-bit case above, intel only does a WORD access. */
4738 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4739 {
4740 uint16_t uSel;
4741 rcStrict = iemMemStackPopU16Ex(pVCpu, &uSel, &TmpRsp);
4742 if (rcStrict == VINF_SUCCESS)
4743 {
4744 iemRegAddToRspEx(pVCpu, &TmpRsp, sizeof(uint64_t) - sizeof(uint16_t));
4745 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4746 }
4747 }
4748 else
4749 {
4750 uint64_t u64Value;
4751 rcStrict = iemMemStackPopU64Ex(pVCpu, &u64Value, &TmpRsp);
4752 if (rcStrict == VINF_SUCCESS)
4753 rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, (uint16_t)u64Value);
4754 }
4755 break;
4756 }
4757 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4758 }
4759
4760 /*
4761 * If the load succeeded, commit the stack change and finish the instruction.
4762 */
4763 if (rcStrict == VINF_SUCCESS)
4764 {
4765 pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
4766 rcStrict = iemCImpl_LoadSRegFinish(pVCpu, cbInstr, iSegReg);
4767 }
4768
4769 return rcStrict;
4770}
4771
4772
4773/**
4774 * Implements lgs, lfs, les, lds & lss.
4775 */
4776IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize)
4777{
4778 /*
4779 * Use iemCImpl_LoadSRegWorker to do the tricky segment register loading.
4780 */
4781 /** @todo verify and test that mov, pop and lXs works the segment
4782 * register loading in the exact same way. */
4783 VBOXSTRICTRC rcStrict = iemCImpl_LoadSRegWorker(pVCpu, iSegReg, uSel);
4784 if (rcStrict == VINF_SUCCESS)
4785 {
4786 switch (enmEffOpSize)
4787 {
4788 case IEMMODE_16BIT:
4789 iemGRegStoreU16(pVCpu, iGReg, offSeg);
4790 break;
4791 case IEMMODE_32BIT:
4792 case IEMMODE_64BIT:
4793 iemGRegStoreU64(pVCpu, iGReg, offSeg);
4794 break;
4795 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4796 }
4797 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4798 }
4799 return rcStrict;
4800}
4801
4802
4803/**
4804 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
4805 *
4806 * @retval VINF_SUCCESS on success.
4807 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
4808 * @retval iemMemFetchSysU64 return value.
4809 *
4810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4811 * @param uSel The selector value.
4812 * @param fAllowSysDesc Whether system descriptors are OK or not.
4813 * @param pDesc Where to return the descriptor on success.
4814 */
4815static VBOXSTRICTRC iemCImpl_LoadDescHelper(PVMCPUCC pVCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
4816{
4817 pDesc->Long.au64[0] = 0;
4818 pDesc->Long.au64[1] = 0;
4819
4820 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */
4821 return VINF_IEM_SELECTOR_NOT_OK;
4822
4823 /* Within the table limits? */
4824 RTGCPTR GCPtrBase;
4825 if (uSel & X86_SEL_LDT)
4826 {
4827 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
4828 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
4829 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
4830 return VINF_IEM_SELECTOR_NOT_OK;
4831 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
4832 }
4833 else
4834 {
4835 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
4836 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
4837 return VINF_IEM_SELECTOR_NOT_OK;
4838 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
4839 }
4840
4841 /* Fetch the descriptor. */
4842 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
4843 if (rcStrict != VINF_SUCCESS)
4844 return rcStrict;
4845 if (!pDesc->Legacy.Gen.u1DescType)
4846 {
4847 if (!fAllowSysDesc)
4848 return VINF_IEM_SELECTOR_NOT_OK;
4849 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4850 {
4851 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
4852 if (rcStrict != VINF_SUCCESS)
4853 return rcStrict;
4854 }
4855
4856 }
4857
4858 return VINF_SUCCESS;
4859}
4860
4861
4862/**
4863 * Implements verr (fWrite = false) and verw (fWrite = true).
4864 */
4865IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite)
4866{
4867 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4868
4869 /** @todo figure whether the accessed bit is set or not. */
4870
4871 bool fAccessible = true;
4872 IEMSELDESC Desc;
4873 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, false /*fAllowSysDesc*/, &Desc);
4874 if (rcStrict == VINF_SUCCESS)
4875 {
4876 /* Check the descriptor, order doesn't matter much here. */
4877 if ( !Desc.Legacy.Gen.u1DescType
4878 || !Desc.Legacy.Gen.u1Present)
4879 fAccessible = false;
4880 else
4881 {
4882 if ( fWrite
4883 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE
4884 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4885 fAccessible = false;
4886
4887 /** @todo testcase for the conforming behavior. */
4888 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4889 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4890 {
4891 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4892 fAccessible = false;
4893 else if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4894 fAccessible = false;
4895 }
4896 }
4897
4898 }
4899 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
4900 fAccessible = false;
4901 else
4902 return rcStrict;
4903
4904 /* commit */
4905 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fAccessible;
4906
4907 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
4908}
4909
4910
4911/**
4912 * Implements LAR and LSL with 64-bit operand size.
4913 *
4914 * @returns VINF_SUCCESS.
4915 * @param pu64Dst Pointer to the destination register.
4916 * @param uSel The selector to load details for.
4917 * @param fIsLar true = LAR, false = LSL.
4918 */
4919IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar)
4920{
4921 Assert(!IEM_IS_REAL_OR_V86_MODE(pVCpu));
4922
4923 /** @todo figure whether the accessed bit is set or not. */
4924
4925 bool fDescOk = true;
4926 IEMSELDESC Desc;
4927 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pVCpu, uSel, true /*fAllowSysDesc*/, &Desc);
4928 if (rcStrict == VINF_SUCCESS)
4929 {
4930 /*
4931 * Check the descriptor type.
4932 */
4933 if (!Desc.Legacy.Gen.u1DescType)
4934 {
4935 if (CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)))
4936 {
4937 if (Desc.Long.Gen.u5Zeros)
4938 fDescOk = false;
4939 else
4940 switch (Desc.Long.Gen.u4Type)
4941 {
4942 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
4943 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
4944 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
4945 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
4946 break;
4947 case AMD64_SEL_TYPE_SYS_CALL_GATE:
4948 fDescOk = fIsLar;
4949 break;
4950 default:
4951 fDescOk = false;
4952 break;
4953 }
4954 }
4955 else
4956 {
4957 switch (Desc.Long.Gen.u4Type)
4958 {
4959 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4960 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4961 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4962 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4963 case X86_SEL_TYPE_SYS_LDT:
4964 break;
4965 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4966 case X86_SEL_TYPE_SYS_TASK_GATE:
4967 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4968 fDescOk = fIsLar;
4969 break;
4970 default:
4971 fDescOk = false;
4972 break;
4973 }
4974 }
4975 }
4976 if (fDescOk)
4977 {
4978 /*
4979 * Check the RPL/DPL/CPL interaction..
4980 */
4981 /** @todo testcase for the conforming behavior. */
4982 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
4983 || !Desc.Legacy.Gen.u1DescType)
4984 {
4985 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
4986 fDescOk = false;
4987 else if (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl)
4988 fDescOk = false;
4989 }
4990 }
4991
4992 if (fDescOk)
4993 {
4994 /*
4995 * All fine, start committing the result.
4996 */
4997 if (fIsLar)
4998 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00);
4999 else
5000 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy);
5001 }
5002
5003 }
5004 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
5005 fDescOk = false;
5006 else
5007 return rcStrict;
5008
5009 /* commit flags value and advance rip. */
5010 pVCpu->cpum.GstCtx.eflags.Bits.u1ZF = fDescOk;
5011 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5012}
5013
5014
5015/**
5016 * Implements LAR and LSL with 16-bit operand size.
5017 *
5018 * @returns VINF_SUCCESS.
5019 * @param pu16Dst Pointer to the destination register.
5020 * @param uSel The selector to load details for.
5021 * @param fIsLar true = LAR, false = LSL.
5022 */
5023IEM_CIMPL_DEF_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar)
5024{
5025 uint64_t u64TmpDst = *pu16Dst;
5026 IEM_CIMPL_CALL_3(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, fIsLar);
5027 *pu16Dst = u64TmpDst;
5028 return VINF_SUCCESS;
5029}
5030
5031
5032/**
5033 * Implements lgdt.
5034 *
5035 * @param iEffSeg The segment of the new gdtr contents
5036 * @param GCPtrEffSrc The address of the new gdtr contents.
5037 * @param enmEffOpSize The effective operand size.
5038 */
5039IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5040{
5041 if (IEM_GET_CPL(pVCpu) != 0)
5042 return iemRaiseGeneralProtectionFault0(pVCpu);
5043 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5044
5045 if (!IEM_IS_IN_GUEST(pVCpu))
5046 { /* probable */ }
5047 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5048 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5049 {
5050 Log(("lgdt: Guest intercept -> VM-exit\n"));
5051 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_LGDT, cbInstr);
5052 }
5053 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_WRITES))
5054 {
5055 Log(("lgdt: Guest intercept -> #VMEXIT\n"));
5056 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5057 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5058 }
5059
5060 /*
5061 * Fetch the limit and base address.
5062 */
5063 uint16_t cbLimit;
5064 RTGCPTR GCPtrBase;
5065 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5066 if (rcStrict == VINF_SUCCESS)
5067 {
5068 if ( !IEM_IS_64BIT_CODE(pVCpu)
5069 || X86_IS_CANONICAL(GCPtrBase))
5070 {
5071 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit);
5072 if (rcStrict == VINF_SUCCESS)
5073 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5074 }
5075 else
5076 {
5077 Log(("iemCImpl_lgdt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5078 return iemRaiseGeneralProtectionFault0(pVCpu);
5079 }
5080 }
5081 return rcStrict;
5082}
5083
5084
5085/**
5086 * Implements sgdt.
5087 *
5088 * @param iEffSeg The segment where to store the gdtr content.
5089 * @param GCPtrEffDst The address where to store the gdtr content.
5090 */
5091IEM_CIMPL_DEF_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5092{
5093 /*
5094 * Join paths with sidt.
5095 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5096 * you really must know.
5097 */
5098 if (!IEM_IS_IN_GUEST(pVCpu))
5099 { /* probable */ }
5100 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5101 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5102 {
5103 Log(("sgdt: Guest intercept -> VM-exit\n"));
5104 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_GDTR_IDTR_ACCESS, VMXINSTRID_SGDT, cbInstr);
5105 }
5106 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_GDTR_READS))
5107 {
5108 Log(("sgdt: Guest intercept -> #VMEXIT\n"));
5109 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5110 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5111 }
5112
5113 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR);
5114 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.gdtr.cbGdt, pVCpu->cpum.GstCtx.gdtr.pGdt, iEffSeg, GCPtrEffDst);
5115 if (rcStrict == VINF_SUCCESS)
5116 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5117 return rcStrict;
5118}
5119
5120
5121/**
5122 * Implements lidt.
5123 *
5124 * @param iEffSeg The segment of the new idtr contents
5125 * @param GCPtrEffSrc The address of the new idtr contents.
5126 * @param enmEffOpSize The effective operand size.
5127 */
5128IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
5129{
5130 if (IEM_GET_CPL(pVCpu) != 0)
5131 return iemRaiseGeneralProtectionFault0(pVCpu);
5132 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5133
5134 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_WRITES))
5135 { /* probable */ }
5136 else
5137 {
5138 Log(("lidt: Guest intercept -> #VMEXIT\n"));
5139 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5140 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5141 }
5142
5143 /*
5144 * Fetch the limit and base address.
5145 */
5146 uint16_t cbLimit;
5147 RTGCPTR GCPtrBase;
5148 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pVCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
5149 if (rcStrict == VINF_SUCCESS)
5150 {
5151 if ( !IEM_IS_64BIT_CODE(pVCpu)
5152 || X86_IS_CANONICAL(GCPtrBase))
5153 {
5154 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit);
5155 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5156 }
5157 else
5158 {
5159 Log(("iemCImpl_lidt: Non-canonical base %04x:%RGv\n", cbLimit, GCPtrBase));
5160 return iemRaiseGeneralProtectionFault0(pVCpu);
5161 }
5162 }
5163 return rcStrict;
5164}
5165
5166
5167/**
5168 * Implements sidt.
5169 *
5170 * @param iEffSeg The segment where to store the idtr content.
5171 * @param GCPtrEffDst The address where to store the idtr content.
5172 */
5173IEM_CIMPL_DEF_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5174{
5175 /*
5176 * Join paths with sgdt.
5177 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
5178 * you really must know.
5179 */
5180 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
5181 { /* probable */ }
5182 else
5183 {
5184 Log(("sidt: Guest intercept -> #VMEXIT\n"));
5185 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5186 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5187 }
5188
5189 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_IDTR);
5190 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pVCpu, pVCpu->cpum.GstCtx.idtr.cbIdt, pVCpu->cpum.GstCtx.idtr.pIdt, iEffSeg, GCPtrEffDst);
5191 if (rcStrict == VINF_SUCCESS)
5192 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5193 return rcStrict;
5194}
5195
5196
5197/**
5198 * Implements lldt.
5199 *
5200 * @param uNewLdt The new LDT selector value.
5201 */
5202IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
5203{
5204 /*
5205 * Check preconditions.
5206 */
5207 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5208 {
5209 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
5210 return iemRaiseUndefinedOpcode(pVCpu);
5211 }
5212 if (IEM_GET_CPL(pVCpu) != 0)
5213 {
5214 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, IEM_GET_CPL(pVCpu)));
5215 return iemRaiseGeneralProtectionFault0(pVCpu);
5216 }
5217
5218 /* Nested-guest VMX intercept (SVM is after all checks). */
5219 /** @todo testcase: exit vs check order. */
5220 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5221 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5222 { /* probable */ }
5223 else
5224 {
5225 Log(("lldt: Guest intercept -> VM-exit\n"));
5226 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LLDT, cbInstr);
5227 }
5228
5229 if (uNewLdt & X86_SEL_LDT)
5230 {
5231 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
5232 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewLdt);
5233 }
5234
5235 /*
5236 * Now, loading a NULL selector is easy.
5237 */
5238 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
5239 {
5240 /* Nested-guest SVM intercept. */
5241 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5242 { /* probable */ }
5243 else
5244 {
5245 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5246 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5247 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5248 }
5249
5250 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
5251 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_LDTR;
5252 CPUMSetGuestLDTR(pVCpu, uNewLdt);
5253 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
5254 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5255 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
5256 {
5257 /* AMD-V seems to leave the base and limit alone. */
5258 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
5259 }
5260 else
5261 {
5262 /* VT-x (Intel 3960x) seems to be doing the following. */
5263 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D;
5264 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
5265 pVCpu->cpum.GstCtx.ldtr.u32Limit = UINT32_MAX;
5266 }
5267
5268 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5269 }
5270
5271 /*
5272 * Read the descriptor.
5273 */
5274 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR);
5275 IEMSELDESC Desc;
5276 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
5277 if (rcStrict != VINF_SUCCESS)
5278 return rcStrict;
5279
5280 /* Check GPs first. */
5281 if (Desc.Legacy.Gen.u1DescType)
5282 {
5283 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5284 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5285 }
5286 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
5287 {
5288 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
5289 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5290 }
5291 uint64_t u64Base;
5292 if (!IEM_IS_LONG_MODE(pVCpu))
5293 u64Base = X86DESC_BASE(&Desc.Legacy);
5294 else
5295 {
5296 if (Desc.Long.Gen.u5Zeros)
5297 {
5298 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
5299 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5300 }
5301
5302 u64Base = X86DESC64_BASE(&Desc.Long);
5303 if (!IEM_IS_CANONICAL(u64Base))
5304 {
5305 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
5306 return iemRaiseGeneralProtectionFault(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5307 }
5308 }
5309
5310 /* NP */
5311 if (!Desc.Legacy.Gen.u1Present)
5312 {
5313 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
5314 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewLdt);
5315 }
5316
5317 /* Nested-guest SVM intercept. */
5318 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_LDTR_WRITES))
5319 { /* probable */ }
5320 else
5321 {
5322 Log(("lldt: Guest intercept -> #VMEXIT\n"));
5323 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5324 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5325 }
5326
5327 /*
5328 * It checks out alright, update the registers.
5329 */
5330/** @todo check if the actual value is loaded or if the RPL is dropped */
5331 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
5332 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
5333 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
5334 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5335 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5336 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
5337
5338 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5339}
5340
5341
5342/**
5343 * Implements sldt GReg
5344 *
5345 * @param iGReg The general register to store the CRx value in.
5346 * @param enmEffOpSize The operand size.
5347 */
5348IEM_CIMPL_DEF_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5349{
5350 if (!IEM_IS_IN_GUEST(pVCpu))
5351 { /* probable */ }
5352 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5353 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5354 {
5355 Log(("sldt: Guest intercept -> VM-exit\n"));
5356 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_SLDT, cbInstr);
5357 }
5358 else
5359 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0, cbInstr);
5360
5361 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5362 switch (enmEffOpSize)
5363 {
5364 case IEMMODE_16BIT:
5365 iemGRegStoreU16(pVCpu, iGReg, pVCpu->cpum.GstCtx.ldtr.Sel);
5366 break;
5367 case IEMMODE_32BIT:
5368 case IEMMODE_64BIT:
5369 iemGRegStoreU64(pVCpu, iGReg, pVCpu->cpum.GstCtx.ldtr.Sel);
5370 break;
5371 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5372 }
5373 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5374}
5375
5376
5377/**
5378 * Implements sldt mem.
5379 *
5380 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5381 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5382 */
5383IEM_CIMPL_DEF_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5384{
5385 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0, cbInstr);
5386
5387 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR);
5388 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.ldtr.Sel);
5389 if (rcStrict == VINF_SUCCESS)
5390 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5391 return rcStrict;
5392}
5393
5394
5395/**
5396 * Implements ltr.
5397 *
5398 * @param uNewTr The new TSS selector value.
5399 */
5400IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
5401{
5402 /*
5403 * Check preconditions.
5404 */
5405 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
5406 {
5407 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
5408 return iemRaiseUndefinedOpcode(pVCpu);
5409 }
5410 if (IEM_GET_CPL(pVCpu) != 0)
5411 {
5412 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, IEM_GET_CPL(pVCpu)));
5413 return iemRaiseGeneralProtectionFault0(pVCpu);
5414 }
5415 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5416 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5417 { /* probable */ }
5418 else
5419 {
5420 Log(("ltr: Guest intercept -> VM-exit\n"));
5421 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_LTR, cbInstr);
5422 }
5423 if (uNewTr & X86_SEL_LDT)
5424 {
5425 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
5426 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewTr);
5427 }
5428 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
5429 {
5430 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
5431 return iemRaiseGeneralProtectionFault0(pVCpu);
5432 }
5433 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TR_WRITES))
5434 { /* probable */ }
5435 else
5436 {
5437 Log(("ltr: Guest intercept -> #VMEXIT\n"));
5438 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5439 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5440 }
5441
5442 /*
5443 * Read the descriptor.
5444 */
5445 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_TR);
5446 IEMSELDESC Desc;
5447 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
5448 if (rcStrict != VINF_SUCCESS)
5449 return rcStrict;
5450
5451 /* Check GPs first. */
5452 if (Desc.Legacy.Gen.u1DescType)
5453 {
5454 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5455 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5456 }
5457 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
5458 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5459 || IEM_IS_LONG_MODE(pVCpu)) )
5460 {
5461 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
5462 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5463 }
5464 uint64_t u64Base;
5465 if (!IEM_IS_LONG_MODE(pVCpu))
5466 u64Base = X86DESC_BASE(&Desc.Legacy);
5467 else
5468 {
5469 if (Desc.Long.Gen.u5Zeros)
5470 {
5471 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
5472 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5473 }
5474
5475 u64Base = X86DESC64_BASE(&Desc.Long);
5476 if (!IEM_IS_CANONICAL(u64Base))
5477 {
5478 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
5479 return iemRaiseGeneralProtectionFault(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5480 }
5481 }
5482
5483 /* NP */
5484 if (!Desc.Legacy.Gen.u1Present)
5485 {
5486 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
5487 return iemRaiseSelectorNotPresentBySelector(pVCpu, uNewTr);
5488 }
5489
5490 /*
5491 * Set it busy.
5492 * Note! Intel says this should lock down the whole descriptor, but we'll
5493 * restrict our selves to 32-bit for now due to lack of inline
5494 * assembly and such.
5495 */
5496 uint8_t bUnmapInfo;
5497 void *pvDesc;
5498 rcStrict = iemMemMap(pVCpu, &pvDesc, &bUnmapInfo, 8, UINT8_MAX,
5499 pVCpu->cpum.GstCtx.gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW, 0);
5500 if (rcStrict != VINF_SUCCESS)
5501 return rcStrict;
5502 switch ((uintptr_t)pvDesc & 3)
5503 {
5504 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
5505 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
5506 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
5507 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
5508 }
5509 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
5510 if (rcStrict != VINF_SUCCESS)
5511 return rcStrict;
5512 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
5513
5514 /*
5515 * It checks out alright, update the registers.
5516 */
5517/** @todo check if the actual value is loaded or if the RPL is dropped */
5518 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
5519 pVCpu->cpum.GstCtx.tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
5520 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
5521 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
5522 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
5523 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
5524
5525 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5526}
5527
5528
5529/**
5530 * Implements str GReg
5531 *
5532 * @param iGReg The general register to store the CRx value in.
5533 * @param enmEffOpSize The operand size.
5534 */
5535IEM_CIMPL_DEF_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5536{
5537 if (!IEM_IS_IN_GUEST(pVCpu))
5538 { /* probable */ }
5539 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5540 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5541 {
5542 Log(("str_reg: Guest intercept -> VM-exit\n"));
5543 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5544 }
5545 else
5546 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0, cbInstr);
5547
5548 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5549 switch (enmEffOpSize)
5550 {
5551 case IEMMODE_16BIT:
5552 iemGRegStoreU16(pVCpu, iGReg, pVCpu->cpum.GstCtx.tr.Sel);
5553 break;
5554 case IEMMODE_32BIT:
5555 case IEMMODE_64BIT:
5556 iemGRegStoreU64(pVCpu, iGReg, pVCpu->cpum.GstCtx.tr.Sel);
5557 break;
5558 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5559 }
5560 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5561}
5562
5563
5564/**
5565 * Implements str mem.
5566 *
5567 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5568 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5569 */
5570IEM_CIMPL_DEF_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5571{
5572 if (!IEM_IS_IN_GUEST(pVCpu))
5573 { /* probable */ }
5574 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5575 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_DESC_TABLE_EXIT))
5576 {
5577 Log(("str_mem: Guest intercept -> VM-exit\n"));
5578 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_LDTR_TR_ACCESS, VMXINSTRID_STR, cbInstr);
5579 }
5580 else
5581 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0, cbInstr);
5582
5583 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR);
5584 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, pVCpu->cpum.GstCtx.tr.Sel);
5585 if (rcStrict == VINF_SUCCESS)
5586 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5587 return rcStrict;
5588}
5589
5590
5591/**
5592 * Implements mov GReg,CRx.
5593 *
5594 * @param iGReg The general register to store the CRx value in.
5595 * @param iCrReg The CRx register to read (valid).
5596 */
5597IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
5598{
5599 if (IEM_GET_CPL(pVCpu) != 0)
5600 return iemRaiseGeneralProtectionFault0(pVCpu);
5601 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
5602
5603 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(pVCpu, iCrReg))
5604 { /* probable */ }
5605 else
5606 {
5607 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg));
5608 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5609 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);
5610 }
5611
5612 /* Read it. */
5613 uint64_t crX;
5614 switch (iCrReg)
5615 {
5616 case 0:
5617 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5618 crX = pVCpu->cpum.GstCtx.cr0;
5619 if (IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_386)
5620 crX |= UINT32_C(0x7fffffe0); /* All reserved CR0 flags are set on a 386, just like MSW on 286. */
5621 break;
5622 case 2:
5623 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR2);
5624 crX = pVCpu->cpum.GstCtx.cr2;
5625 break;
5626 case 3:
5627 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5628 crX = pVCpu->cpum.GstCtx.cr3;
5629 break;
5630 case 4:
5631 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5632 crX = pVCpu->cpum.GstCtx.cr4;
5633 break;
5634 case 8:
5635 {
5636 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
5637 if (!IEM_IS_IN_GUEST(pVCpu))
5638 { /* probable */ }
5639#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5640 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5641 {
5642 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr8(pVCpu, iGReg, cbInstr);
5643 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5644 return rcStrict;
5645
5646 /*
5647 * If the Mov-from-CR8 doesn't cause a VM-exit, bits 7:4 of the VTPR is copied
5648 * to bits 0:3 of the destination operand. Bits 63:4 of the destination operand
5649 * are cleared.
5650 *
5651 * See Intel Spec. 29.3 "Virtualizing CR8-based TPR Accesses"
5652 */
5653 if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
5654 {
5655 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
5656 crX = (uTpr >> 4) & 0xf;
5657 break;
5658 }
5659 }
5660#endif
5661#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5662 else if (pVCpu->iem.s.fExec & IEM_F_X86_CTX_SVM)
5663 {
5664 PCSVMVMCBCTRL pVmcbCtrl = &pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl;
5665 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
5666 {
5667 crX = pVmcbCtrl->IntCtrl.n.u8VTPR & 0xf;
5668 break;
5669 }
5670 }
5671#endif
5672 uint8_t uTpr;
5673 int rc = APICGetTpr(pVCpu, &uTpr, NULL, NULL);
5674 if (RT_SUCCESS(rc))
5675 crX = uTpr >> 4;
5676 else
5677 crX = 0;
5678 break;
5679 }
5680 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
5681 }
5682
5683#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5684 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5685 { /* probable */ }
5686 else
5687 switch (iCrReg)
5688 {
5689 /* CR0/CR4 reads are subject to masking when in VMX non-root mode. */
5690 case 0: crX = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u); break;
5691 case 4: crX = CPUMGetGuestVmxMaskedCr4(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u); break;
5692 case 3:
5693 {
5694 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovFromCr3(pVCpu, iGReg, cbInstr);
5695 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5696 return rcStrict;
5697 break;
5698 }
5699 }
5700#endif
5701
5702 /* Store it. */
5703 if (IEM_IS_64BIT_CODE(pVCpu))
5704 iemGRegStoreU64(pVCpu, iGReg, crX);
5705 else
5706 iemGRegStoreU64(pVCpu, iGReg, (uint32_t)crX);
5707
5708 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5709}
5710
5711
5712/**
5713 * Implements smsw GReg.
5714 *
5715 * @param iGReg The general register to store the CRx value in.
5716 * @param enmEffOpSize The operand size.
5717 */
5718IEM_CIMPL_DEF_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize)
5719{
5720 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */, cbInstr);
5721
5722#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5723 uint64_t u64MaskedCr0;
5724 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5725 u64MaskedCr0 = pVCpu->cpum.GstCtx.cr0;
5726 else
5727 u64MaskedCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5728 uint64_t const u64GuestCr0 = u64MaskedCr0;
5729#else
5730 uint64_t const u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5731#endif
5732
5733 switch (enmEffOpSize)
5734 {
5735 case IEMMODE_16BIT:
5736 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5737 iemGRegStoreU16(pVCpu, iGReg, (uint16_t)u64GuestCr0);
5738 /* Unused bits are set on 386 and older CPU: */
5739 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5740 iemGRegStoreU16(pVCpu, iGReg, (uint16_t)u64GuestCr0 | 0xffe0);
5741 else
5742 iemGRegStoreU16(pVCpu, iGReg, (uint16_t)u64GuestCr0 | 0xfff0);
5743 break;
5744
5745/** @todo testcase for bits 31:16. We're not doing that correctly. */
5746
5747 case IEMMODE_32BIT:
5748 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5749 iemGRegStoreU32(pVCpu, iGReg, (uint32_t)u64GuestCr0);
5750 else /** @todo test this! */
5751 iemGRegStoreU32(pVCpu, iGReg, (uint32_t)u64GuestCr0 | UINT32_C(0x7fffffe0)); /* Unused bits are set on 386. */
5752 break;
5753
5754 case IEMMODE_64BIT:
5755 iemGRegStoreU64(pVCpu, iGReg, u64GuestCr0);
5756 break;
5757
5758 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5759 }
5760
5761 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5762}
5763
5764
5765/**
5766 * Implements smsw mem.
5767 *
5768 * @param iEffSeg The effective segment register to use with @a GCPtrMem.
5769 * @param GCPtrEffDst Where to store the 16-bit CR0 value.
5770 */
5771IEM_CIMPL_DEF_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
5772{
5773 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5774 if (!IEM_IS_IN_GUEST(pVCpu))
5775 { /* probable */ }
5776 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5777 u64GuestCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u);
5778 else
5779 IEM_SVM_CHECK_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */, cbInstr);
5780
5781 uint16_t u16Value;
5782 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
5783 u16Value = (uint16_t)u64GuestCr0;
5784 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
5785 u16Value = (uint16_t)u64GuestCr0 | 0xffe0;
5786 else
5787 u16Value = (uint16_t)u64GuestCr0 | 0xfff0;
5788
5789 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iEffSeg, GCPtrEffDst, u16Value);
5790 if (rcStrict == VINF_SUCCESS)
5791 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
5792 return rcStrict;
5793}
5794
5795
5796/**
5797 * Helper for mapping CR3 and PAE PDPEs for 'mov CRx,GReg'.
5798 */
5799#define IEM_MAP_PAE_PDPES_AT_CR3_RET(a_pVCpu, a_iCrReg, a_uCr3) \
5800 do \
5801 { \
5802 int const rcX = PGMGstMapPaePdpesAtCr3(a_pVCpu, a_uCr3); \
5803 if (RT_SUCCESS(rcX)) \
5804 { /* likely */ } \
5805 else \
5806 { \
5807 /* Either invalid PDPTEs or CR3 second-level translation failed. Raise #GP(0) either way. */ \
5808 Log(("iemCImpl_load_Cr%#x: Trying to load invalid PAE PDPEs\n", a_iCrReg)); \
5809 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
5810 } \
5811 } while (0)
5812
5813
5814/**
5815 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
5816 *
5817 * @param iCrReg The CRx register to write (valid).
5818 * @param uNewCrX The new value.
5819 * @param enmAccessCrX The instruction that caused the CrX load.
5820 * @param iGReg The general register in case of a 'mov CRx,GReg'
5821 * instruction.
5822 */
5823IEM_CIMPL_DEF_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg)
5824{
5825 VBOXSTRICTRC rcStrict;
5826 int rc;
5827#ifndef VBOX_WITH_NESTED_HWVIRT_SVM
5828 RT_NOREF2(iGReg, enmAccessCrX);
5829#endif
5830
5831 /*
5832 * Try store it.
5833 * Unfortunately, CPUM only does a tiny bit of the work.
5834 */
5835 switch (iCrReg)
5836 {
5837 case 0:
5838 {
5839 /*
5840 * Perform checks.
5841 */
5842 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5843
5844 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr0;
5845 uint32_t const fValid = CPUMGetGuestCR0ValidMask();
5846
5847 /* ET is hardcoded on 486 and later. */
5848 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_486)
5849 uNewCrX |= X86_CR0_ET;
5850 /* The 386 and 486 didn't #GP(0) on attempting to set reserved CR0 bits. ET was settable on 386. */
5851 else if (IEM_GET_TARGET_CPU(pVCpu) == IEMTARGETCPU_486)
5852 {
5853 uNewCrX &= fValid;
5854 uNewCrX |= X86_CR0_ET;
5855 }
5856 else
5857 uNewCrX &= X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG | X86_CR0_ET;
5858
5859 /* Check for reserved bits. */
5860 if (uNewCrX & ~(uint64_t)fValid)
5861 {
5862 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
5863 return iemRaiseGeneralProtectionFault0(pVCpu);
5864 }
5865
5866 /* Check for invalid combinations. */
5867 if ( (uNewCrX & X86_CR0_PG)
5868 && !(uNewCrX & X86_CR0_PE) )
5869 {
5870 Log(("Trying to set CR0.PG without CR0.PE\n"));
5871 return iemRaiseGeneralProtectionFault0(pVCpu);
5872 }
5873
5874 if ( !(uNewCrX & X86_CR0_CD)
5875 && (uNewCrX & X86_CR0_NW) )
5876 {
5877 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
5878 return iemRaiseGeneralProtectionFault0(pVCpu);
5879 }
5880
5881 if ( !(uNewCrX & X86_CR0_PG)
5882 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE))
5883 {
5884 Log(("Trying to clear CR0.PG while leaving CR4.PCID set\n"));
5885 return iemRaiseGeneralProtectionFault0(pVCpu);
5886 }
5887
5888 /* Long mode consistency checks. */
5889 if ( (uNewCrX & X86_CR0_PG)
5890 && !(uOldCrX & X86_CR0_PG)
5891 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5892 {
5893 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE))
5894 {
5895 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
5896 return iemRaiseGeneralProtectionFault0(pVCpu);
5897 }
5898 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1Long)
5899 {
5900 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
5901 return iemRaiseGeneralProtectionFault0(pVCpu);
5902 }
5903 }
5904
5905 /** @todo testcase: what happens if we disable paging while in 64-bit code? */
5906
5907 if (!IEM_IS_IN_GUEST(pVCpu))
5908 { /* probable */ }
5909#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5910 /* Check for bits that must remain set or cleared in VMX operation,
5911 see Intel spec. 23.8 "Restrictions on VMX operation". */
5912 else if (IEM_VMX_IS_ROOT_MODE(pVCpu))
5913 {
5914 uint64_t const uCr0Fixed0 = iemVmxGetCr0Fixed0(pVCpu, IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
5915 if ((uNewCrX & uCr0Fixed0) != uCr0Fixed0)
5916 {
5917 Log(("Trying to clear reserved CR0 bits in VMX operation: NewCr0=%#llx MB1=%#llx\n", uNewCrX, uCr0Fixed0));
5918 return iemRaiseGeneralProtectionFault0(pVCpu);
5919 }
5920
5921 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
5922 if (uNewCrX & ~uCr0Fixed1)
5923 {
5924 Log(("Trying to set reserved CR0 bits in VMX operation: NewCr0=%#llx MB0=%#llx\n", uNewCrX, uCr0Fixed1));
5925 return iemRaiseGeneralProtectionFault0(pVCpu);
5926 }
5927 }
5928#endif
5929 /*
5930 * SVM nested-guest CR0 write intercepts.
5931 */
5932 else if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, iCrReg))
5933 {
5934 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
5935 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5936 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);
5937 }
5938 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE))
5939 {
5940 /* 'lmsw' intercepts regardless of whether the TS/MP bits are actually toggled. */
5941 if ( enmAccessCrX == IEMACCESSCRX_LMSW
5942 || (uNewCrX & ~(X86_CR0_TS | X86_CR0_MP)) != (uOldCrX & ~(X86_CR0_TS | X86_CR0_MP)))
5943 {
5944 Assert(enmAccessCrX != IEMACCESSCRX_CLTS);
5945 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg));
5946 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
5947 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);
5948 }
5949 }
5950
5951 /*
5952 * Change EFER.LMA if entering or leaving long mode.
5953 */
5954 uint64_t NewEFER = pVCpu->cpum.GstCtx.msrEFER;
5955 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
5956 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME) )
5957 {
5958 if (uNewCrX & X86_CR0_PG)
5959 NewEFER |= MSR_K6_EFER_LMA;
5960 else
5961 NewEFER &= ~MSR_K6_EFER_LMA;
5962
5963 CPUMSetGuestEFER(pVCpu, NewEFER);
5964 Assert(pVCpu->cpum.GstCtx.msrEFER == NewEFER);
5965 }
5966
5967 /*
5968 * Inform PGM.
5969 */
5970 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW))
5971 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE | X86_CR0_CD | X86_CR0_NW)) )
5972 {
5973 if ( enmAccessCrX != IEMACCESSCRX_MOV_CRX
5974 || !CPUMIsPaePagingEnabled(uNewCrX, pVCpu->cpum.GstCtx.cr4, NewEFER)
5975 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5976 { /* likely */ }
5977 else
5978 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
5979 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
5980 AssertRCReturn(rc, rc);
5981 /* ignore informational status codes */
5982 }
5983
5984 /*
5985 * Change CR0.
5986 */
5987 CPUMSetGuestCR0(pVCpu, uNewCrX);
5988 Assert(pVCpu->cpum.GstCtx.cr0 == uNewCrX);
5989
5990 /* Update the fExec flags if PE changed. */
5991 if ((uNewCrX ^ uOldCrX) & X86_CR0_PE)
5992 iemRecalcExecModeAndCplFlags(pVCpu);
5993
5994 /*
5995 * Inform PGM some more...
5996 */
5997 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
5998 false /* fForce */);
5999 break;
6000 }
6001
6002 /*
6003 * CR2 can be changed without any restrictions.
6004 */
6005 case 2:
6006 {
6007 if (!IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 2))
6008 { /* probable */ }
6009 else
6010 {
6011 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6012 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6013 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);
6014 }
6015 pVCpu->cpum.GstCtx.cr2 = uNewCrX;
6016 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_CR2;
6017 rcStrict = VINF_SUCCESS;
6018 break;
6019 }
6020
6021 /*
6022 * CR3 is relatively simple, although AMD and Intel have different
6023 * accounts of how setting reserved bits are handled. We take intel's
6024 * word for the lower bits and AMD's for the high bits (63:52). The
6025 * lower reserved bits are ignored and left alone; OpenBSD 5.8 relies
6026 * on this.
6027 */
6028 /** @todo Testcase: Setting reserved bits in CR3, especially before
6029 * enabling paging. */
6030 case 3:
6031 {
6032 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
6033
6034 /* Bit 63 being clear in the source operand with PCIDE indicates no invalidations are required. */
6035 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCIDE)
6036 && (uNewCrX & RT_BIT_64(63)))
6037 {
6038 /** @todo r=ramshankar: avoiding a TLB flush altogether here causes Windows 10
6039 * SMP(w/o nested-paging) to hang during bootup on Skylake systems, see
6040 * Intel spec. 4.10.4.1 "Operations that Invalidate TLBs and
6041 * Paging-Structure Caches". */
6042 uNewCrX &= ~RT_BIT_64(63);
6043 }
6044
6045 /* Check / mask the value. */
6046#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6047 /* See Intel spec. 27.2.2 "EPT Translation Mechanism" footnote. */
6048 uint64_t const fInvPhysMask = !CPUMIsGuestVmxEptPagingEnabledEx(IEM_GET_CTX(pVCpu))
6049 ? (UINT64_MAX << IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
6050 : (~X86_CR3_EPT_PAGE_MASK & X86_PAGE_4K_BASE_MASK);
6051#else
6052 uint64_t const fInvPhysMask = UINT64_C(0xfff0000000000000);
6053#endif
6054 if (uNewCrX & fInvPhysMask)
6055 {
6056 /** @todo Should we raise this only for 64-bit mode like Intel claims? AMD is
6057 * very vague in this area. As mentioned above, need testcase on real
6058 * hardware... Sigh. */
6059 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
6060 return iemRaiseGeneralProtectionFault0(pVCpu);
6061 }
6062
6063 uint64_t fValid;
6064 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6065 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME))
6066 {
6067 /** @todo Redundant? This value has already been validated above. */
6068 fValid = UINT64_C(0x000fffffffffffff);
6069 }
6070 else
6071 fValid = UINT64_C(0xffffffff);
6072 if (uNewCrX & ~fValid)
6073 {
6074 Log(("Automatically clearing reserved MBZ bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
6075 uNewCrX, uNewCrX & ~fValid));
6076 uNewCrX &= fValid;
6077 }
6078
6079 if (!IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 3))
6080 { /* probable */ }
6081 else
6082 {
6083 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6084 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6085 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);
6086 }
6087
6088 /* Inform PGM. */
6089 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG)
6090 {
6091 if ( !CPUMIsGuestInPAEModeEx(IEM_GET_CTX(pVCpu))
6092 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6093 { /* likely */ }
6094 else
6095 {
6096 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6097 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, uNewCrX);
6098 }
6099 rc = PGMFlushTLB(pVCpu, uNewCrX, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
6100 AssertRCReturn(rc, rc);
6101 /* ignore informational status codes */
6102 }
6103
6104 /* Make the change. */
6105 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
6106 AssertRCSuccessReturn(rc, rc);
6107
6108 rcStrict = VINF_SUCCESS;
6109 break;
6110 }
6111
6112 /*
6113 * CR4 is a bit more tedious as there are bits which cannot be cleared
6114 * under some circumstances and such.
6115 */
6116 case 4:
6117 {
6118 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6119 uint64_t const uOldCrX = pVCpu->cpum.GstCtx.cr4;
6120
6121 /* Reserved bits. */
6122 uint32_t const fValid = CPUMGetGuestCR4ValidMask(pVCpu->CTX_SUFF(pVM));
6123 if (uNewCrX & ~(uint64_t)fValid)
6124 {
6125 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
6126 return iemRaiseGeneralProtectionFault0(pVCpu);
6127 }
6128
6129 bool const fPcide = !(uOldCrX & X86_CR4_PCIDE) && (uNewCrX & X86_CR4_PCIDE);
6130 bool const fLongMode = CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu));
6131
6132 /* PCIDE check. */
6133 if ( fPcide
6134 && ( !fLongMode
6135 || (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))))
6136 {
6137 Log(("Trying to set PCIDE with invalid PCID or outside long mode. Pcid=%#x\n", (pVCpu->cpum.GstCtx.cr3 & UINT64_C(0xfff))));
6138 return iemRaiseGeneralProtectionFault0(pVCpu);
6139 }
6140
6141 /* PAE check. */
6142 if ( fLongMode
6143 && (uOldCrX & X86_CR4_PAE)
6144 && !(uNewCrX & X86_CR4_PAE))
6145 {
6146 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
6147 return iemRaiseGeneralProtectionFault0(pVCpu);
6148 }
6149
6150 if (!IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 4))
6151 { /* probable */ }
6152 else
6153 {
6154 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6155 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6156 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);
6157 }
6158
6159 /* Check for bits that must remain set or cleared in VMX operation,
6160 see Intel spec. 23.8 "Restrictions on VMX operation". */
6161 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
6162 { /* probable */ }
6163 else
6164 {
6165 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
6166 if ((uNewCrX & uCr4Fixed0) != uCr4Fixed0)
6167 {
6168 Log(("Trying to clear reserved CR4 bits in VMX operation: NewCr4=%#llx MB1=%#llx\n", uNewCrX, uCr4Fixed0));
6169 return iemRaiseGeneralProtectionFault0(pVCpu);
6170 }
6171
6172 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
6173 if (uNewCrX & ~uCr4Fixed1)
6174 {
6175 Log(("Trying to set reserved CR4 bits in VMX operation: NewCr4=%#llx MB0=%#llx\n", uNewCrX, uCr4Fixed1));
6176 return iemRaiseGeneralProtectionFault0(pVCpu);
6177 }
6178 }
6179
6180 /*
6181 * Notify PGM.
6182 */
6183 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */))
6184 {
6185 if ( !CPUMIsPaePagingEnabled(pVCpu->cpum.GstCtx.cr0, uNewCrX, pVCpu->cpum.GstCtx.msrEFER)
6186 || CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6187 { /* likely */ }
6188 else
6189 {
6190 Assert(enmAccessCrX == IEMACCESSCRX_MOV_CRX);
6191 IEM_MAP_PAE_PDPES_AT_CR3_RET(pVCpu, iCrReg, pVCpu->cpum.GstCtx.cr3);
6192 }
6193 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true /* global */);
6194 AssertRCReturn(rc, rc);
6195 /* ignore informational status codes */
6196 }
6197
6198 /*
6199 * Change it.
6200 */
6201 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
6202 AssertRCSuccessReturn(rc, rc);
6203 Assert(pVCpu->cpum.GstCtx.cr4 == uNewCrX);
6204
6205 rcStrict = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
6206 false /* fForce */);
6207 break;
6208 }
6209
6210 /*
6211 * CR8 maps to the APIC TPR.
6212 */
6213 case 8:
6214 {
6215 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
6216 if (uNewCrX & ~(uint64_t)0xf)
6217 {
6218 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
6219 return iemRaiseGeneralProtectionFault0(pVCpu);
6220 }
6221
6222 if (!IEM_IS_IN_GUEST(pVCpu))
6223 { /* probable */ }
6224#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6225 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6226 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_TPR_SHADOW))
6227 {
6228 /*
6229 * If the Mov-to-CR8 doesn't cause a VM-exit, bits 0:3 of the source operand
6230 * is copied to bits 7:4 of the VTPR. Bits 0:3 and bits 31:8 of the VTPR are
6231 * cleared. Following this the processor performs TPR virtualization.
6232 *
6233 * However, we should not perform TPR virtualization immediately here but
6234 * after this instruction has completed.
6235 *
6236 * See Intel spec. 29.3 "Virtualizing CR8-based TPR Accesses"
6237 * See Intel spec. 27.1 "Architectural State Before A VM-exit"
6238 */
6239 uint32_t const uTpr = (uNewCrX & 0xf) << 4;
6240 Log(("iemCImpl_load_Cr%#x: Virtualizing TPR (%#x) write\n", iCrReg, uTpr));
6241 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
6242 iemVmxVirtApicSetPendingWrite(pVCpu, XAPIC_OFF_TPR);
6243 rcStrict = VINF_SUCCESS;
6244 break;
6245 }
6246#endif
6247#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6248 else if (pVCpu->iem.s.fExec & IEM_F_X86_CTX_SVM)
6249 {
6250 if (IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(pVCpu, /*cr*/ 8))
6251 {
6252 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg));
6253 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6254 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);
6255 }
6256
6257 pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u8VTPR = uNewCrX;
6258 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, IEM_GET_CTX(pVCpu)))
6259 {
6260 rcStrict = VINF_SUCCESS;
6261 break;
6262 }
6263 }
6264#endif
6265 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4;
6266 APICSetTpr(pVCpu, u8Tpr);
6267 rcStrict = VINF_SUCCESS;
6268 break;
6269 }
6270
6271 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6272 }
6273
6274 /*
6275 * Advance the RIP on success.
6276 */
6277 if (RT_SUCCESS(rcStrict))
6278 {
6279 if (rcStrict != VINF_SUCCESS)
6280 iemSetPassUpStatus(pVCpu, rcStrict);
6281 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6282 }
6283
6284 return rcStrict;
6285}
6286
6287
6288/**
6289 * Implements mov CRx,GReg.
6290 *
6291 * @param iCrReg The CRx register to write (valid).
6292 * @param iGReg The general register to load the CRx value from.
6293 */
6294IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
6295{
6296 if (IEM_GET_CPL(pVCpu) != 0)
6297 return iemRaiseGeneralProtectionFault0(pVCpu);
6298 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6299
6300 /*
6301 * Read the new value from the source register and call common worker.
6302 */
6303 uint64_t uNewCrX;
6304 if (IEM_IS_64BIT_CODE(pVCpu))
6305 uNewCrX = iemGRegFetchU64(pVCpu, iGReg);
6306 else
6307 uNewCrX = iemGRegFetchU32(pVCpu, iGReg);
6308
6309#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6310 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6311 { /* probable */ }
6312 else
6313 {
6314 VBOXSTRICTRC rcStrict = VINF_VMX_INTERCEPT_NOT_ACTIVE;
6315 switch (iCrReg)
6316 {
6317 case 0:
6318 case 4: rcStrict = iemVmxVmexitInstrMovToCr0Cr4(pVCpu, iCrReg, &uNewCrX, iGReg, cbInstr); break;
6319 case 3: rcStrict = iemVmxVmexitInstrMovToCr3(pVCpu, uNewCrX, iGReg, cbInstr); break;
6320 case 8: rcStrict = iemVmxVmexitInstrMovToCr8(pVCpu, iGReg, cbInstr); break;
6321 }
6322 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6323 return rcStrict;
6324 }
6325#endif
6326
6327 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, iCrReg, uNewCrX, IEMACCESSCRX_MOV_CRX, iGReg);
6328}
6329
6330
6331/**
6332 * Implements 'LMSW r/m16'
6333 *
6334 * @param u16NewMsw The new value.
6335 * @param GCPtrEffDst The guest-linear address of the source operand in case
6336 * of a memory operand. For register operand, pass
6337 * NIL_RTGCPTR.
6338 */
6339IEM_CIMPL_DEF_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst)
6340{
6341 if (IEM_GET_CPL(pVCpu) != 0)
6342 return iemRaiseGeneralProtectionFault0(pVCpu);
6343 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6344 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6345
6346#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6347 /* Check nested-guest VMX intercept and get updated MSW if there's no VM-exit. */
6348 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6349 { /* probable */ }
6350 else
6351 {
6352 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrLmsw(pVCpu, pVCpu->cpum.GstCtx.cr0, &u16NewMsw, GCPtrEffDst, cbInstr);
6353 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6354 return rcStrict;
6355 }
6356#else
6357 RT_NOREF_PV(GCPtrEffDst);
6358#endif
6359
6360 /*
6361 * Compose the new CR0 value and call common worker.
6362 */
6363 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6364 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
6365 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_LMSW, UINT8_MAX /* iGReg */);
6366}
6367
6368
6369/**
6370 * Implements 'CLTS'.
6371 */
6372IEM_CIMPL_DEF_0(iemCImpl_clts)
6373{
6374 if (IEM_GET_CPL(pVCpu) != 0)
6375 return iemRaiseGeneralProtectionFault0(pVCpu);
6376
6377 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6378 uint64_t uNewCr0 = pVCpu->cpum.GstCtx.cr0;
6379 uNewCr0 &= ~X86_CR0_TS;
6380
6381#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6382 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6383 { /* probable */ }
6384 else
6385 {
6386 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrClts(pVCpu, cbInstr);
6387 if (rcStrict == VINF_VMX_MODIFIES_BEHAVIOR)
6388 uNewCr0 |= (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS);
6389 else if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6390 return rcStrict;
6391 }
6392#endif
6393
6394 return IEM_CIMPL_CALL_4(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0, IEMACCESSCRX_CLTS, UINT8_MAX /* iGReg */);
6395}
6396
6397
6398/**
6399 * Implements mov GReg,DRx.
6400 *
6401 * @param iGReg The general register to store the DRx value in.
6402 * @param iDrReg The DRx register to read (0-7).
6403 */
6404IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
6405{
6406#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6407 /*
6408 * Check nested-guest VMX intercept.
6409 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6410 * over CPL and CR4.DE and even DR4/DR5 checks.
6411 *
6412 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6413 */
6414 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6415 { /* probable */ }
6416 else
6417 {
6418 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_FROM_DRX, iDrReg, iGReg, cbInstr);
6419 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6420 return rcStrict;
6421 }
6422#endif
6423
6424 /*
6425 * Check preconditions.
6426 */
6427 /* Raise GPs. */
6428 if (IEM_GET_CPL(pVCpu) != 0)
6429 return iemRaiseGeneralProtectionFault0(pVCpu);
6430 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6431 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6432
6433 /** @todo \#UD in outside ring-0 too? */
6434 if (iDrReg == 4 || iDrReg == 5)
6435 {
6436 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR4);
6437 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6438 {
6439 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
6440 return iemRaiseGeneralProtectionFault0(pVCpu);
6441 }
6442 iDrReg += 2;
6443 }
6444
6445 /* Raise #DB if general access detect is enabled. */
6446 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6447 {
6448 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
6449 return iemRaiseDebugException(pVCpu);
6450 }
6451
6452 /*
6453 * Read the debug register and store it in the specified general register.
6454 */
6455 uint64_t drX;
6456 switch (iDrReg)
6457 {
6458 case 0:
6459 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6460 drX = pVCpu->cpum.GstCtx.dr[0];
6461 break;
6462 case 1:
6463 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6464 drX = pVCpu->cpum.GstCtx.dr[1];
6465 break;
6466 case 2:
6467 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6468 drX = pVCpu->cpum.GstCtx.dr[2];
6469 break;
6470 case 3:
6471 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6472 drX = pVCpu->cpum.GstCtx.dr[3];
6473 break;
6474 case 6:
6475 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6476 drX = pVCpu->cpum.GstCtx.dr[6];
6477 drX |= X86_DR6_RA1_MASK;
6478 drX &= ~X86_DR6_RAZ_MASK;
6479 break;
6480 case 7:
6481 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6482 drX = pVCpu->cpum.GstCtx.dr[7];
6483 drX |=X86_DR7_RA1_MASK;
6484 drX &= ~X86_DR7_RAZ_MASK;
6485 break;
6486 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* caller checks */
6487 }
6488
6489 /** @todo SVM nested-guest intercept for DR8-DR15? */
6490 /*
6491 * Check for any SVM nested-guest intercepts for the DRx read.
6492 */
6493 if (!IEM_SVM_IS_READ_DR_INTERCEPT_SET(pVCpu, iDrReg))
6494 { /* probable */ }
6495 else
6496 {
6497 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg));
6498 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6499 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),
6500 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6501 }
6502
6503 if (IEM_IS_64BIT_CODE(pVCpu))
6504 iemGRegStoreU64(pVCpu, iGReg, drX);
6505 else
6506 iemGRegStoreU32(pVCpu, iGReg, (uint32_t)drX);
6507
6508 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6509}
6510
6511
6512/**
6513 * Implements mov DRx,GReg.
6514 *
6515 * @param iDrReg The DRx register to write (valid).
6516 * @param iGReg The general register to load the DRx value from.
6517 */
6518IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
6519{
6520#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6521 /*
6522 * Check nested-guest VMX intercept.
6523 * Unlike most other intercepts, the Mov DRx intercept takes preceedence
6524 * over CPL and CR4.DE and even DR4/DR5 checks.
6525 *
6526 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
6527 */
6528 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6529 { /* probable */ }
6530 else
6531 {
6532 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrMovDrX(pVCpu, VMXINSTRID_MOV_TO_DRX, iDrReg, iGReg, cbInstr);
6533 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6534 return rcStrict;
6535 }
6536#endif
6537
6538 /*
6539 * Check preconditions.
6540 */
6541 if (IEM_GET_CPL(pVCpu) != 0)
6542 return iemRaiseGeneralProtectionFault0(pVCpu);
6543 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6544 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6545
6546 if (iDrReg == 4 || iDrReg == 5)
6547 {
6548 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_CR4);
6549 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE)
6550 {
6551 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
6552 return iemRaiseGeneralProtectionFault0(pVCpu);
6553 }
6554 iDrReg += 2;
6555 }
6556
6557 /* Raise #DB if general access detect is enabled. */
6558 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
6559 * \#GP? */
6560 if (pVCpu->cpum.GstCtx.dr[7] & X86_DR7_GD)
6561 {
6562 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
6563 return iemRaiseDebugException(pVCpu);
6564 }
6565
6566 /*
6567 * Read the new value from the source register.
6568 */
6569 uint64_t uNewDrX;
6570 if (IEM_IS_64BIT_CODE(pVCpu))
6571 uNewDrX = iemGRegFetchU64(pVCpu, iGReg);
6572 else
6573 uNewDrX = iemGRegFetchU32(pVCpu, iGReg);
6574
6575 /*
6576 * Adjust it.
6577 */
6578 switch (iDrReg)
6579 {
6580 case 0:
6581 case 1:
6582 case 2:
6583 case 3:
6584 /* nothing to adjust */
6585 break;
6586
6587 case 6:
6588 if (uNewDrX & X86_DR6_MBZ_MASK)
6589 {
6590 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6591 return iemRaiseGeneralProtectionFault0(pVCpu);
6592 }
6593 uNewDrX |= X86_DR6_RA1_MASK;
6594 uNewDrX &= ~X86_DR6_RAZ_MASK;
6595 break;
6596
6597 case 7:
6598 if (uNewDrX & X86_DR7_MBZ_MASK)
6599 {
6600 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
6601 return iemRaiseGeneralProtectionFault0(pVCpu);
6602 }
6603 uNewDrX |= X86_DR7_RA1_MASK;
6604 uNewDrX &= ~X86_DR7_RAZ_MASK;
6605 break;
6606
6607 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6608 }
6609
6610 /** @todo SVM nested-guest intercept for DR8-DR15? */
6611 /*
6612 * Check for any SVM nested-guest intercepts for the DRx write.
6613 */
6614 if (!IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(pVCpu, iDrReg))
6615 { /* probable */ }
6616 else
6617 {
6618 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg));
6619 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6620 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),
6621 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */);
6622 }
6623
6624 /*
6625 * Do the actual setting.
6626 */
6627 if (iDrReg < 4)
6628 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
6629 else if (iDrReg == 6)
6630 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
6631
6632 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX);
6633 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc);
6634
6635 /*
6636 * Re-init hardware breakpoint summary if it was DR7 that got changed.
6637 */
6638 if (iDrReg == 7)
6639 iemRecalcExecDbgFlags(pVCpu);
6640
6641 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6642}
6643
6644
6645/**
6646 * Implements mov GReg,TRx.
6647 *
6648 * @param iGReg The general register to store the
6649 * TRx value in.
6650 * @param iTrReg The TRx register to read (6/7).
6651 */
6652IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg)
6653{
6654 /*
6655 * Check preconditions. NB: This instruction is 386/486 only.
6656 */
6657
6658 /* Raise GPs. */
6659 if (IEM_GET_CPL(pVCpu) != 0)
6660 return iemRaiseGeneralProtectionFault0(pVCpu);
6661 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6662
6663 if (iTrReg < 6 || iTrReg > 7)
6664 {
6665 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6666 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6667 return iemRaiseGeneralProtectionFault0(pVCpu);
6668 }
6669
6670 /*
6671 * Read the test register and store it in the specified general register.
6672 * This is currently a dummy implementation that only exists to satisfy
6673 * old debuggers like WDEB386 or OS/2 KDB which unconditionally read the
6674 * TR6/TR7 registers. Software which actually depends on the TR values
6675 * (different on 386/486) is exceedingly rare.
6676 */
6677 uint32_t trX;
6678 switch (iTrReg)
6679 {
6680 case 6:
6681 trX = 0; /* Currently a dummy. */
6682 break;
6683 case 7:
6684 trX = 0; /* Currently a dummy. */
6685 break;
6686 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
6687 }
6688
6689 iemGRegStoreU32(pVCpu, iGReg, trX);
6690
6691 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6692}
6693
6694
6695/**
6696 * Implements mov TRx,GReg.
6697 *
6698 * @param iTrReg The TRx register to write (valid).
6699 * @param iGReg The general register to load the TRx
6700 * value from.
6701 */
6702IEM_CIMPL_DEF_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg)
6703{
6704 /*
6705 * Check preconditions. NB: This instruction is 386/486 only.
6706 */
6707
6708 /* Raise GPs. */
6709 if (IEM_GET_CPL(pVCpu) != 0)
6710 return iemRaiseGeneralProtectionFault0(pVCpu);
6711 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6712
6713 if (iTrReg < 6 || iTrReg > 7)
6714 {
6715 /** @todo Do Intel CPUs reject this or are the TRs aliased? */
6716 Log(("mov r%u,tr%u: invalid register -> #GP(0)\n", iGReg, iTrReg));
6717 return iemRaiseGeneralProtectionFault0(pVCpu);
6718 }
6719
6720 /*
6721 * Read the new value from the source register.
6722 */
6723 uint32_t uNewTrX = iemGRegFetchU32(pVCpu, iGReg);
6724
6725 /*
6726 * Here we would do the actual setting if this weren't a dummy implementation.
6727 * This is currently a dummy implementation that only exists to prevent
6728 * old debuggers like WDEB386 or OS/2 KDB from crashing.
6729 */
6730 RT_NOREF(uNewTrX);
6731
6732 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6733}
6734
6735
6736/**
6737 * Implements 'INVLPG m'.
6738 *
6739 * @param GCPtrPage The effective address of the page to invalidate.
6740 * @remarks Updates the RIP.
6741 */
6742IEM_CIMPL_DEF_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage)
6743{
6744 /* ring-0 only. */
6745 if (IEM_GET_CPL(pVCpu) != 0)
6746 return iemRaiseGeneralProtectionFault0(pVCpu);
6747 Assert(!pVCpu->cpum.GstCtx.eflags.Bits.u1VM);
6748 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6749
6750 if (!IEM_IS_IN_GUEST(pVCpu))
6751 { /* probable */ }
6752#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6753 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6754 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6755 {
6756 Log(("invlpg: Guest intercept (%RGp) -> VM-exit\n", GCPtrPage));
6757 return iemVmxVmexitInstrInvlpg(pVCpu, GCPtrPage, cbInstr);
6758 }
6759#endif
6760 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPG))
6761 {
6762 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));
6763 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
6764 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPG,
6765 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */);
6766 }
6767
6768 int rc = PGMInvalidatePage(pVCpu, GCPtrPage);
6769 if (rc == VINF_SUCCESS)
6770 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6771 if (rc == VINF_PGM_SYNC_CR3)
6772 {
6773 iemSetPassUpStatus(pVCpu, rc);
6774 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6775 }
6776
6777 AssertMsg(RT_FAILURE_NP(rc), ("%Rrc\n", rc));
6778 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", GCPtrPage, rc));
6779 return rc;
6780}
6781
6782
6783/**
6784 * Implements INVPCID.
6785 *
6786 * @param iEffSeg The segment of the invpcid descriptor.
6787 * @param GCPtrInvpcidDesc The address of invpcid descriptor.
6788 * @param uInvpcidType The invalidation type.
6789 * @remarks Updates the RIP.
6790 */
6791IEM_CIMPL_DEF_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType)
6792{
6793 /*
6794 * Check preconditions.
6795 */
6796 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fInvpcid)
6797 return iemRaiseUndefinedOpcode(pVCpu);
6798
6799 /* When in VMX non-root mode and INVPCID is not enabled, it results in #UD. */
6800 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6801 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_INVPCID)))
6802 { /* likely */ }
6803 else
6804 {
6805 Log(("invpcid: Not enabled for nested-guest execution -> #UD\n"));
6806 return iemRaiseUndefinedOpcode(pVCpu);
6807 }
6808
6809 if (IEM_GET_CPL(pVCpu) != 0)
6810 {
6811 Log(("invpcid: CPL != 0 -> #GP(0)\n"));
6812 return iemRaiseGeneralProtectionFault0(pVCpu);
6813 }
6814
6815 if (IEM_IS_V86_MODE(pVCpu))
6816 {
6817 Log(("invpcid: v8086 mode -> #GP(0)\n"));
6818 return iemRaiseGeneralProtectionFault0(pVCpu);
6819 }
6820
6821 /*
6822 * Check nested-guest intercept.
6823 *
6824 * INVPCID causes a VM-exit if "enable INVPCID" and "INVLPG exiting" are
6825 * both set. We have already checked the former earlier in this function.
6826 *
6827 * CPL and virtual-8086 mode checks take priority over this VM-exit.
6828 * See Intel spec. "25.1.1 Relative Priority of Faults and VM Exits".
6829 */
6830 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6831 || !IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INVLPG_EXIT))
6832 { /* probable */ }
6833 else
6834 {
6835 Log(("invpcid: Guest intercept -> #VM-exit\n"));
6836 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_INVPCID, VMXINSTRID_NONE, cbInstr);
6837 }
6838
6839 if (uInvpcidType > X86_INVPCID_TYPE_MAX_VALID)
6840 {
6841 Log(("invpcid: invalid/unrecognized invpcid type %#RX64 -> #GP(0)\n", uInvpcidType));
6842 return iemRaiseGeneralProtectionFault0(pVCpu);
6843 }
6844 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
6845
6846 /*
6847 * Fetch the invpcid descriptor from guest memory.
6848 */
6849 RTUINT128U uDesc;
6850 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvpcidDesc);
6851 if (rcStrict == VINF_SUCCESS)
6852 {
6853 /*
6854 * Validate the descriptor.
6855 */
6856 if (uDesc.s.Lo > 0xfff)
6857 {
6858 Log(("invpcid: reserved bits set in invpcid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
6859 return iemRaiseGeneralProtectionFault0(pVCpu);
6860 }
6861
6862 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
6863 uint8_t const uPcid = uDesc.s.Lo & UINT64_C(0xfff);
6864 uint32_t const uCr4 = pVCpu->cpum.GstCtx.cr4;
6865 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
6866 switch (uInvpcidType)
6867 {
6868 case X86_INVPCID_TYPE_INDV_ADDR:
6869 {
6870 if (!IEM_IS_CANONICAL(GCPtrInvAddr))
6871 {
6872 Log(("invpcid: invalidation address %#RGP is not canonical -> #GP(0)\n", GCPtrInvAddr));
6873 return iemRaiseGeneralProtectionFault0(pVCpu);
6874 }
6875 if ( !(uCr4 & X86_CR4_PCIDE)
6876 && uPcid != 0)
6877 {
6878 Log(("invpcid: invalid pcid %#x\n", uPcid));
6879 return iemRaiseGeneralProtectionFault0(pVCpu);
6880 }
6881
6882 /* Invalidate mappings for the linear address tagged with PCID except global translations. */
6883 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6884 break;
6885 }
6886
6887 case X86_INVPCID_TYPE_SINGLE_CONTEXT:
6888 {
6889 if ( !(uCr4 & X86_CR4_PCIDE)
6890 && uPcid != 0)
6891 {
6892 Log(("invpcid: invalid pcid %#x\n", uPcid));
6893 return iemRaiseGeneralProtectionFault0(pVCpu);
6894 }
6895 /* Invalidate all mappings associated with PCID except global translations. */
6896 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6897 break;
6898 }
6899
6900 case X86_INVPCID_TYPE_ALL_CONTEXT_INCL_GLOBAL:
6901 {
6902 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
6903 break;
6904 }
6905
6906 case X86_INVPCID_TYPE_ALL_CONTEXT_EXCL_GLOBAL:
6907 {
6908 PGMFlushTLB(pVCpu, uCr3, false /* fGlobal */);
6909 break;
6910 }
6911 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6912 }
6913 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6914 }
6915 return rcStrict;
6916}
6917
6918
6919/**
6920 * Implements INVD.
6921 */
6922IEM_CIMPL_DEF_0(iemCImpl_invd)
6923{
6924 if (IEM_GET_CPL(pVCpu) != 0)
6925 {
6926 Log(("invd: CPL != 0 -> #GP(0)\n"));
6927 return iemRaiseGeneralProtectionFault0(pVCpu);
6928 }
6929
6930 if (!IEM_IS_IN_GUEST(pVCpu))
6931 { /* probable */ }
6932 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6933 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_INVD, cbInstr);
6934 else
6935 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0, cbInstr);
6936
6937 /* We currently take no action here. */
6938 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6939}
6940
6941
6942/**
6943 * Implements WBINVD.
6944 */
6945IEM_CIMPL_DEF_0(iemCImpl_wbinvd)
6946{
6947 if (IEM_GET_CPL(pVCpu) != 0)
6948 {
6949 Log(("wbinvd: CPL != 0 -> #GP(0)\n"));
6950 return iemRaiseGeneralProtectionFault0(pVCpu);
6951 }
6952
6953 if (!IEM_IS_IN_GUEST(pVCpu))
6954 { /* probable */ }
6955 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6956 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WBINVD, cbInstr);
6957 else
6958 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0, cbInstr);
6959
6960 /* We currently take no action here. */
6961 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
6962}
6963
6964
6965/** Opcode 0x0f 0xaa. */
6966IEM_CIMPL_DEF_0(iemCImpl_rsm)
6967{
6968 IEM_SVM_CHECK_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0, cbInstr);
6969 NOREF(cbInstr);
6970 return iemRaiseUndefinedOpcode(pVCpu);
6971}
6972
6973
6974/**
6975 * Implements RDTSC.
6976 */
6977IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
6978{
6979 /*
6980 * Check preconditions.
6981 */
6982 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fTsc)
6983 return iemRaiseUndefinedOpcode(pVCpu);
6984
6985 if (IEM_GET_CPL(pVCpu) != 0)
6986 {
6987 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6988 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
6989 {
6990 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
6991 return iemRaiseGeneralProtectionFault0(pVCpu);
6992 }
6993 }
6994
6995 if (!IEM_IS_IN_GUEST(pVCpu))
6996 { /* probable */ }
6997 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6998 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
6999 {
7000 Log(("rdtsc: Guest intercept -> VM-exit\n"));
7001 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSC, cbInstr);
7002 }
7003 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSC))
7004 {
7005 Log(("rdtsc: Guest intercept -> #VMEXIT\n"));
7006 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7007 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7008 }
7009
7010 /*
7011 * Do the job.
7012 */
7013 uint64_t uTicks = TMCpuTickGet(pVCpu);
7014#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7015 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7016#endif
7017 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7018 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7019 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX); /* For IEMExecDecodedRdtsc. */
7020 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7021}
7022
7023
7024/**
7025 * Implements RDTSC.
7026 */
7027IEM_CIMPL_DEF_0(iemCImpl_rdtscp)
7028{
7029 /*
7030 * Check preconditions.
7031 */
7032 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fRdTscP)
7033 return iemRaiseUndefinedOpcode(pVCpu);
7034
7035 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7036 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDTSCP)))
7037 { /* likely */ }
7038 else
7039 {
7040 Log(("rdtscp: Not enabled for VMX non-root mode -> #UD\n"));
7041 return iemRaiseUndefinedOpcode(pVCpu);
7042 }
7043
7044 if (IEM_GET_CPL(pVCpu) != 0)
7045 {
7046 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7047 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_TSD)
7048 {
7049 Log(("rdtscp: CR4.TSD and CPL=%u -> #GP(0)\n", IEM_GET_CPL(pVCpu)));
7050 return iemRaiseGeneralProtectionFault0(pVCpu);
7051 }
7052 }
7053
7054 if (!IEM_IS_IN_GUEST(pVCpu))
7055 { /* probable */ }
7056 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7057 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDTSC_EXIT))
7058 {
7059 Log(("rdtscp: Guest intercept -> VM-exit\n"));
7060 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDTSCP, cbInstr);
7061 }
7062 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDTSCP))
7063 {
7064 Log(("rdtscp: Guest intercept -> #VMEXIT\n"));
7065 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7066 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7067 }
7068
7069 /*
7070 * Do the job.
7071 * Query the MSR first in case of trips to ring-3.
7072 */
7073 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TSC_AUX);
7074 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pVCpu->cpum.GstCtx.rcx);
7075 if (rcStrict == VINF_SUCCESS)
7076 {
7077 /* Low dword of the TSC_AUX msr only. */
7078 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
7079
7080 uint64_t uTicks = TMCpuTickGet(pVCpu);
7081#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
7082 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks);
7083#endif
7084 pVCpu->cpum.GstCtx.rax = RT_LO_U32(uTicks);
7085 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(uTicks);
7086 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX); /* For IEMExecDecodedRdtscp. */
7087 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7088 }
7089 return rcStrict;
7090}
7091
7092
7093/**
7094 * Implements RDPMC.
7095 */
7096IEM_CIMPL_DEF_0(iemCImpl_rdpmc)
7097{
7098 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
7099
7100 if ( IEM_GET_CPL(pVCpu) != 0
7101 && !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PCE))
7102 return iemRaiseGeneralProtectionFault0(pVCpu);
7103
7104 if (!IEM_IS_IN_GUEST(pVCpu))
7105 { /* probable */ }
7106 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7107 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_RDPMC_EXIT))
7108 {
7109 Log(("rdpmc: Guest intercept -> VM-exit\n"));
7110 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDPMC, cbInstr);
7111 }
7112 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_RDPMC))
7113 {
7114 Log(("rdpmc: Guest intercept -> #VMEXIT\n"));
7115 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7116 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7117 }
7118
7119 /** @todo Emulate performance counters, for now just return 0. */
7120 pVCpu->cpum.GstCtx.rax = 0;
7121 pVCpu->cpum.GstCtx.rdx = 0;
7122 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7123 /** @todo We should trigger a \#GP here if the CPU doesn't support the index in
7124 * ecx but see @bugref{3472}! */
7125
7126 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7127}
7128
7129
7130/**
7131 * Implements RDMSR.
7132 */
7133IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
7134{
7135 /*
7136 * Check preconditions.
7137 */
7138 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7139 return iemRaiseUndefinedOpcode(pVCpu);
7140 if (IEM_GET_CPL(pVCpu) != 0)
7141 return iemRaiseGeneralProtectionFault0(pVCpu);
7142
7143 /*
7144 * Check nested-guest intercepts.
7145 */
7146 if (!IEM_IS_IN_GUEST(pVCpu))
7147 { /* probable */ }
7148#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7149 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7150 {
7151 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_RDMSR, pVCpu->cpum.GstCtx.ecx))
7152 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_RDMSR, cbInstr);
7153 }
7154#endif
7155#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7156 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7157 {
7158 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, pVCpu->cpum.GstCtx.ecx, false /* fWrite */, cbInstr);
7159 if (rcStrict == VINF_SVM_VMEXIT)
7160 return VINF_SUCCESS;
7161 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7162 {
7163 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", pVCpu->cpum.GstCtx.ecx, VBOXSTRICTRC_VAL(rcStrict)));
7164 return rcStrict;
7165 }
7166 }
7167#endif
7168
7169 /*
7170 * Do the job.
7171 */
7172 RTUINT64U uValue;
7173 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7174 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7175
7176 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pVCpu->cpum.GstCtx.ecx, &uValue.u);
7177 if (rcStrict == VINF_SUCCESS)
7178 {
7179 pVCpu->cpum.GstCtx.rax = uValue.s.Lo;
7180 pVCpu->cpum.GstCtx.rdx = uValue.s.Hi;
7181 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
7182
7183 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7184 }
7185
7186#ifndef IN_RING3
7187 /* Deferred to ring-3. */
7188 if (rcStrict == VINF_CPUM_R3_MSR_READ)
7189 {
7190 Log(("IEM: rdmsr(%#x) -> ring-3\n", pVCpu->cpum.GstCtx.ecx));
7191 return rcStrict;
7192 }
7193#endif
7194
7195 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7196 if (pVCpu->iem.s.cLogRelRdMsr < 32)
7197 {
7198 pVCpu->iem.s.cLogRelRdMsr++;
7199 LogRel(("IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7200 }
7201 else
7202 Log(( "IEM: rdmsr(%#x) -> #GP(0)\n", pVCpu->cpum.GstCtx.ecx));
7203 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7204 return iemRaiseGeneralProtectionFault0(pVCpu);
7205}
7206
7207
7208/**
7209 * Implements WRMSR.
7210 */
7211IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
7212{
7213 /*
7214 * Check preconditions.
7215 */
7216 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMsr)
7217 return iemRaiseUndefinedOpcode(pVCpu);
7218 if (IEM_GET_CPL(pVCpu) != 0)
7219 return iemRaiseGeneralProtectionFault0(pVCpu);
7220
7221 RTUINT64U uValue;
7222 uValue.s.Lo = pVCpu->cpum.GstCtx.eax;
7223 uValue.s.Hi = pVCpu->cpum.GstCtx.edx;
7224
7225 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
7226
7227 /** @todo make CPUMAllMsrs.cpp import the necessary MSR state. */
7228 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL_MSRS);
7229
7230 /*
7231 * Check nested-guest intercepts.
7232 */
7233 if (!IEM_IS_IN_GUEST(pVCpu))
7234 { /* probable */ }
7235#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7236 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7237 {
7238 if (iemVmxIsRdmsrWrmsrInterceptSet(pVCpu, VMX_EXIT_WRMSR, idMsr))
7239 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_WRMSR, cbInstr);
7240 }
7241#endif
7242#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7243 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MSR_PROT))
7244 {
7245 VBOXSTRICTRC rcStrict = iemSvmHandleMsrIntercept(pVCpu, idMsr, true /* fWrite */, cbInstr);
7246 if (rcStrict == VINF_SVM_VMEXIT)
7247 return VINF_SUCCESS;
7248 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7249 {
7250 Log(("IEM: SVM intercepted rdmsr(%#x) failed. rc=%Rrc\n", idMsr, VBOXSTRICTRC_VAL(rcStrict)));
7251 return rcStrict;
7252 }
7253 }
7254#endif
7255
7256 /*
7257 * Do the job.
7258 */
7259 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, idMsr, uValue.u);
7260 if (rcStrict == VINF_SUCCESS)
7261 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7262
7263#ifndef IN_RING3
7264 /* Deferred to ring-3. */
7265 if (rcStrict == VINF_CPUM_R3_MSR_WRITE)
7266 {
7267 Log(("IEM: wrmsr(%#x) -> ring-3\n", idMsr));
7268 return rcStrict;
7269 }
7270#endif
7271
7272 /* Often a unimplemented MSR or MSR bit, so worth logging. */
7273 if (pVCpu->iem.s.cLogRelWrMsr < 32)
7274 {
7275 pVCpu->iem.s.cLogRelWrMsr++;
7276 LogRel(("IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7277 }
7278 else
7279 Log(( "IEM: wrmsr(%#x,%#x`%08x) -> #GP(0)\n", idMsr, uValue.s.Hi, uValue.s.Lo));
7280 AssertMsgReturn(rcStrict == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), VERR_IPE_UNEXPECTED_STATUS);
7281 return iemRaiseGeneralProtectionFault0(pVCpu);
7282}
7283
7284
7285/**
7286 * Implements 'IN eAX, port'.
7287 *
7288 * @param u16Port The source port.
7289 * @param cbReg The register size.
7290 * @param bImmAndEffAddrMode Bit 7: Whether the port was specified through an
7291 * immediate operand or the implicit DX register.
7292 * Bits 3-0: Effective address mode.
7293 */
7294IEM_CIMPL_DEF_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode)
7295{
7296 /*
7297 * CPL check
7298 */
7299 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7300 if (rcStrict != VINF_SUCCESS)
7301 return rcStrict;
7302
7303 if (!IEM_IS_IN_GUEST(pVCpu))
7304 { /* probable */ }
7305
7306 /*
7307 * Check VMX nested-guest IO intercept.
7308 */
7309#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7310 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7311 {
7312 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_IN, u16Port, RT_BOOL(bImmAndEffAddrMode & 0x80), cbReg, cbInstr);
7313 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7314 return rcStrict;
7315 }
7316#endif
7317
7318 /*
7319 * Check SVM nested-guest IO intercept.
7320 */
7321#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7322 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7323 {
7324 uint8_t cAddrSizeBits;
7325 switch (bImmAndEffAddrMode & 0xf)
7326 {
7327 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7328 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7329 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7330 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7331 }
7332 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_IN, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7333 false /* fRep */, false /* fStrIo */, cbInstr);
7334 if (rcStrict == VINF_SVM_VMEXIT)
7335 return VINF_SUCCESS;
7336 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7337 {
7338 Log(("iemCImpl_in: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7339 VBOXSTRICTRC_VAL(rcStrict)));
7340 return rcStrict;
7341 }
7342 }
7343#endif
7344#if !defined(VBOX_WITH_NESTED_HWVIRT_VMX) && !defined(VBOX_WITH_NESTED_HWVIRT_SVM)
7345 RT_NOREF(bImmAndEffAddrMode);
7346#endif
7347
7348 /*
7349 * Perform the I/O.
7350 */
7351 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7352 uint32_t u32Value = 0;
7353 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, cbReg);
7354 if (IOM_SUCCESS(rcStrict))
7355 {
7356 switch (cbReg)
7357 {
7358 case 1: pVCpu->cpum.GstCtx.al = (uint8_t)u32Value; break;
7359 case 2: pVCpu->cpum.GstCtx.ax = (uint16_t)u32Value; break;
7360 case 4: pVCpu->cpum.GstCtx.rax = u32Value; break;
7361 default: AssertFailedReturn(VERR_IEM_IPE_3);
7362 }
7363
7364 pVCpu->iem.s.cPotentialExits++;
7365 if (rcStrict != VINF_SUCCESS)
7366 iemSetPassUpStatus(pVCpu, rcStrict);
7367
7368 /*
7369 * Check for I/O breakpoints before we complete the instruction.
7370 */
7371 uint32_t const fDr7 = pVCpu->cpum.GstCtx.dr[7];
7372 if (RT_UNLIKELY( ( ( (fDr7 & X86_DR7_ENABLED_MASK)
7373 && X86_DR7_ANY_RW_IO(fDr7)
7374 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7375 || pVM->dbgf.ro.cEnabledHwIoBreakpoints > 0)
7376 && rcStrict == VINF_SUCCESS))
7377 {
7378 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7379 pVCpu->cpum.GstCtx.eflags.uBoth |= DBGFBpCheckIo2(pVM, pVCpu, u16Port, cbReg);
7380 }
7381
7382 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7383 }
7384
7385 return rcStrict;
7386}
7387
7388
7389/**
7390 * Implements 'IN eAX, DX'.
7391 *
7392 * @param cbReg The register size.
7393 * @param enmEffAddrMode Effective address mode.
7394 */
7395IEM_CIMPL_DEF_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode)
7396{
7397 return IEM_CIMPL_CALL_3(iemCImpl_in, pVCpu->cpum.GstCtx.dx, cbReg, 0 /* fImm */ | enmEffAddrMode);
7398}
7399
7400
7401/**
7402 * Implements 'OUT port, eAX'.
7403 *
7404 * @param u16Port The destination port.
7405 * @param cbReg The register size.
7406 * @param bImmAndEffAddrMode Bit 7: Whether the port was specified through an
7407 * immediate operand or the implicit DX register.
7408 * Bits 3-0: Effective address mode.
7409 */
7410IEM_CIMPL_DEF_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode)
7411{
7412 /*
7413 * CPL check
7414 */
7415 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pVCpu, u16Port, cbReg);
7416 if (rcStrict != VINF_SUCCESS)
7417 return rcStrict;
7418
7419 if (!IEM_IS_IN_GUEST(pVCpu))
7420 { /* probable */ }
7421
7422 /*
7423 * Check VMX nested-guest I/O intercept.
7424 */
7425#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7426 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7427 {
7428 rcStrict = iemVmxVmexitInstrIo(pVCpu, VMXINSTRID_IO_OUT, u16Port, RT_BOOL(bImmAndEffAddrMode & 0x80), cbReg, cbInstr);
7429 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7430 return rcStrict;
7431 }
7432#endif
7433
7434 /*
7435 * Check SVM nested-guest I/O intercept.
7436 */
7437#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
7438 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT))
7439 {
7440 uint8_t cAddrSizeBits;
7441 switch (bImmAndEffAddrMode & 0xf)
7442 {
7443 case IEMMODE_16BIT: cAddrSizeBits = 16; break;
7444 case IEMMODE_32BIT: cAddrSizeBits = 32; break;
7445 case IEMMODE_64BIT: cAddrSizeBits = 64; break;
7446 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7447 }
7448 rcStrict = iemSvmHandleIOIntercept(pVCpu, u16Port, SVMIOIOTYPE_OUT, cbReg, cAddrSizeBits, 0 /* N/A - iEffSeg */,
7449 false /* fRep */, false /* fStrIo */, cbInstr);
7450 if (rcStrict == VINF_SVM_VMEXIT)
7451 return VINF_SUCCESS;
7452 if (rcStrict != VINF_SVM_INTERCEPT_NOT_ACTIVE)
7453 {
7454 Log(("iemCImpl_out: iemSvmHandleIOIntercept failed (u16Port=%#x, cbReg=%u) rc=%Rrc\n", u16Port, cbReg,
7455 VBOXSTRICTRC_VAL(rcStrict)));
7456 return rcStrict;
7457 }
7458 }
7459#endif
7460#if !defined(VBOX_WITH_NESTED_HWVIRT_VMX) && !defined(VBOX_WITH_NESTED_HWVIRT_SVM)
7461 RT_NOREF(bImmAndEffAddrMode);
7462#endif
7463
7464 /*
7465 * Perform the I/O.
7466 */
7467 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
7468 uint32_t u32Value;
7469 switch (cbReg)
7470 {
7471 case 1: u32Value = pVCpu->cpum.GstCtx.al; break;
7472 case 2: u32Value = pVCpu->cpum.GstCtx.ax; break;
7473 case 4: u32Value = pVCpu->cpum.GstCtx.eax; break;
7474 default: AssertFailedReturn(VERR_IEM_IPE_4);
7475 }
7476 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, cbReg);
7477 if (IOM_SUCCESS(rcStrict))
7478 {
7479 pVCpu->iem.s.cPotentialExits++;
7480 if (rcStrict != VINF_SUCCESS)
7481 iemSetPassUpStatus(pVCpu, rcStrict);
7482
7483 /*
7484 * Check for I/O breakpoints before we complete the instruction.
7485 */
7486 uint32_t const fDr7 = pVCpu->cpum.GstCtx.dr[7];
7487 if (RT_UNLIKELY( ( ( (fDr7 & X86_DR7_ENABLED_MASK)
7488 && X86_DR7_ANY_RW_IO(fDr7)
7489 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_DE))
7490 || pVM->dbgf.ro.cEnabledHwIoBreakpoints > 0)
7491 && rcStrict == VINF_SUCCESS))
7492 {
7493 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR6);
7494 pVCpu->cpum.GstCtx.eflags.uBoth |= DBGFBpCheckIo2(pVM, pVCpu, u16Port, cbReg);
7495 }
7496
7497 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7498 }
7499 return rcStrict;
7500}
7501
7502
7503/**
7504 * Implements 'OUT DX, eAX'.
7505 *
7506 * @param cbReg The register size.
7507 * @param enmEffAddrMode Effective address mode.
7508 */
7509IEM_CIMPL_DEF_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode)
7510{
7511 return IEM_CIMPL_CALL_3(iemCImpl_out, pVCpu->cpum.GstCtx.dx, cbReg, 0 /* fImm */ | enmEffAddrMode);
7512}
7513
7514
7515/**
7516 * Implements 'CLI'.
7517 */
7518IEM_CIMPL_DEF_0(iemCImpl_cli)
7519{
7520 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7521#ifdef LOG_ENABLED
7522 uint32_t const fEflOld = fEfl;
7523#endif
7524
7525 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7526 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7527 {
7528 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7529 if (!(fEfl & X86_EFL_VM))
7530 {
7531 if (IEM_GET_CPL(pVCpu) <= uIopl)
7532 fEfl &= ~X86_EFL_IF;
7533 else if ( IEM_GET_CPL(pVCpu) == 3
7534 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI) )
7535 fEfl &= ~X86_EFL_VIF;
7536 else
7537 return iemRaiseGeneralProtectionFault0(pVCpu);
7538 }
7539 /* V8086 */
7540 else if (uIopl == 3)
7541 fEfl &= ~X86_EFL_IF;
7542 else if ( uIopl < 3
7543 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME) )
7544 fEfl &= ~X86_EFL_VIF;
7545 else
7546 return iemRaiseGeneralProtectionFault0(pVCpu);
7547 }
7548 /* real mode */
7549 else
7550 fEfl &= ~X86_EFL_IF;
7551
7552 /* Commit. */
7553 IEMMISC_SET_EFL(pVCpu, fEfl);
7554 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7555 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl));
7556 return rcStrict;
7557}
7558
7559
7560/**
7561 * Implements 'STI'.
7562 */
7563IEM_CIMPL_DEF_0(iemCImpl_sti)
7564{
7565 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
7566 uint32_t const fEflOld = fEfl;
7567
7568 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4);
7569 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
7570 {
7571 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
7572 if (!(fEfl & X86_EFL_VM))
7573 {
7574 if (IEM_GET_CPL(pVCpu) <= uIopl)
7575 fEfl |= X86_EFL_IF;
7576 else if ( IEM_GET_CPL(pVCpu) == 3
7577 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PVI)
7578 && !(fEfl & X86_EFL_VIP) )
7579 fEfl |= X86_EFL_VIF;
7580 else
7581 return iemRaiseGeneralProtectionFault0(pVCpu);
7582 }
7583 /* V8086 */
7584 else if (uIopl == 3)
7585 fEfl |= X86_EFL_IF;
7586 else if ( uIopl < 3
7587 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VME)
7588 && !(fEfl & X86_EFL_VIP) )
7589 fEfl |= X86_EFL_VIF;
7590 else
7591 return iemRaiseGeneralProtectionFault0(pVCpu);
7592 }
7593 /* real mode */
7594 else
7595 fEfl |= X86_EFL_IF;
7596
7597 /*
7598 * Commit.
7599 *
7600 * Note! Setting the shadow interrupt flag must be done after RIP updating.
7601 */
7602 IEMMISC_SET_EFL(pVCpu, fEfl);
7603 VBOXSTRICTRC const rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7604 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF))
7605 {
7606 /** @todo only set it the shadow flag if it was clear before? */
7607 CPUMSetInInterruptShadowSti(&pVCpu->cpum.GstCtx);
7608 }
7609 pVCpu->iem.s.fTbCurInstrIsSti = true;
7610 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
7611 return rcStrict;
7612}
7613
7614
7615/**
7616 * Implements 'HLT'.
7617 */
7618IEM_CIMPL_DEF_0(iemCImpl_hlt)
7619{
7620 if (IEM_GET_CPL(pVCpu) != 0)
7621 return iemRaiseGeneralProtectionFault0(pVCpu);
7622
7623 if (!IEM_IS_IN_GUEST(pVCpu))
7624 { /* probable */ }
7625 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7626 && IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_HLT_EXIT))
7627 {
7628 Log2(("hlt: Guest intercept -> VM-exit\n"));
7629 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_HLT, cbInstr);
7630 }
7631 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_HLT))
7632 {
7633 Log2(("hlt: Guest intercept -> #VMEXIT\n"));
7634 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7635 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7636 }
7637
7638 /** @todo finish: This ASSUMES that iemRegAddToRipAndFinishingClearingRF won't
7639 * be returning any status codes relating to non-guest events being raised, as
7640 * we'll mess up the guest HALT otherwise. */
7641 VBOXSTRICTRC rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7642 if (rcStrict == VINF_SUCCESS)
7643 rcStrict = VINF_EM_HALT;
7644 return rcStrict;
7645}
7646
7647
7648/**
7649 * Implements 'MONITOR'.
7650 */
7651IEM_CIMPL_DEF_1(iemCImpl_monitor, uint8_t, iEffSeg)
7652{
7653 /*
7654 * Permission checks.
7655 */
7656 if (IEM_GET_CPL(pVCpu) != 0)
7657 {
7658 Log2(("monitor: CPL != 0\n"));
7659 return iemRaiseUndefinedOpcode(pVCpu); /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. */
7660 }
7661 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7662 {
7663 Log2(("monitor: Not in CPUID\n"));
7664 return iemRaiseUndefinedOpcode(pVCpu);
7665 }
7666
7667 /*
7668 * Check VMX guest-intercept.
7669 * This should be considered a fault-like VM-exit.
7670 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
7671 */
7672 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7673 || !IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MONITOR_EXIT))
7674 { /* probable */ }
7675 else
7676 {
7677 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7678 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_MONITOR, cbInstr);
7679 }
7680
7681 /*
7682 * Gather the operands and validate them.
7683 */
7684 RTGCPTR GCPtrMem = IEM_IS_64BIT_CODE(pVCpu) ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;
7685 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
7686 uint32_t uEdx = pVCpu->cpum.GstCtx.edx;
7687/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
7688 * \#GP first. */
7689 if (uEcx != 0)
7690 {
7691 Log2(("monitor rax=%RX64, ecx=%RX32, edx=%RX32; ECX != 0 -> #GP(0)\n", GCPtrMem, uEcx, uEdx)); NOREF(uEdx);
7692 return iemRaiseGeneralProtectionFault0(pVCpu);
7693 }
7694
7695 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
7696 if (rcStrict != VINF_SUCCESS)
7697 return rcStrict;
7698
7699 RTGCPHYS GCPhysMem;
7700 /** @todo access size */
7701 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
7702 if (rcStrict != VINF_SUCCESS)
7703 return rcStrict;
7704
7705 if (!IEM_IS_IN_GUEST(pVCpu))
7706 { /* probable */ }
7707#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7708 else if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7709 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
7710 {
7711 /*
7712 * MONITOR does not access the memory, just monitors the address. However,
7713 * if the address falls in the APIC-access page, the address monitored must
7714 * instead be the corresponding address in the virtual-APIC page.
7715 *
7716 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
7717 */
7718 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
7719 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
7720 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
7721 return rcStrict;
7722 }
7723#endif
7724 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MONITOR))
7725 {
7726 Log2(("monitor: Guest intercept -> #VMEXIT\n"));
7727 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7728 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7729 }
7730
7731 /*
7732 * Call EM to prepare the monitor/wait.
7733 */
7734 rcStrict = EMMonitorWaitPrepare(pVCpu, pVCpu->cpum.GstCtx.rax, pVCpu->cpum.GstCtx.rcx, pVCpu->cpum.GstCtx.rdx, GCPhysMem);
7735 Assert(rcStrict == VINF_SUCCESS);
7736 if (rcStrict == VINF_SUCCESS)
7737 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7738 return rcStrict;
7739}
7740
7741
7742/**
7743 * Implements 'MWAIT'.
7744 */
7745IEM_CIMPL_DEF_0(iemCImpl_mwait)
7746{
7747 /*
7748 * Permission checks.
7749 */
7750 if (IEM_GET_CPL(pVCpu) != 0)
7751 {
7752 Log2(("mwait: CPL != 0\n"));
7753 /** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
7754 * EFLAGS.VM then.) */
7755 return iemRaiseUndefinedOpcode(pVCpu);
7756 }
7757 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMonitorMWait)
7758 {
7759 Log2(("mwait: Not in CPUID\n"));
7760 return iemRaiseUndefinedOpcode(pVCpu);
7761 }
7762
7763 /* Check VMX nested-guest intercept. */
7764 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7765 || !IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_MWAIT_EXIT))
7766 { /* probable */ }
7767 else
7768 IEM_VMX_VMEXIT_MWAIT_RET(pVCpu, EMMonitorIsArmed(pVCpu), cbInstr);
7769
7770 /*
7771 * Gather the operands and validate them.
7772 */
7773 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
7774 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
7775 if (uEcx != 0)
7776 {
7777 /* Only supported extension is break on IRQ when IF=0. */
7778 if (uEcx > 1)
7779 {
7780 Log2(("mwait eax=%RX32, ecx=%RX32; ECX > 1 -> #GP(0)\n", uEax, uEcx));
7781 return iemRaiseGeneralProtectionFault0(pVCpu);
7782 }
7783 uint32_t fMWaitFeatures = 0;
7784 uint32_t uIgnore = 0;
7785 CPUMGetGuestCpuId(pVCpu, 5, 0, -1 /*f64BitMode*/, &uIgnore, &uIgnore, &fMWaitFeatures, &uIgnore);
7786 if ( (fMWaitFeatures & (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7787 != (X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0))
7788 {
7789 Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
7790 return iemRaiseGeneralProtectionFault0(pVCpu);
7791 }
7792
7793#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7794 /*
7795 * If the interrupt-window exiting control is set or a virtual-interrupt is pending
7796 * for delivery; and interrupts are disabled the processor does not enter its
7797 * mwait state but rather passes control to the next instruction.
7798 *
7799 * See Intel spec. 25.3 "Changes to Instruction Behavior In VMX Non-root Operation".
7800 */
7801 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7802 || pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
7803 { /* probable */ }
7804 else if ( IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_INT_WINDOW_EXIT)
7805 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
7806 /** @todo finish: check up this out after we move int window stuff out of the
7807 * run loop and into the instruction finishing logic here. */
7808 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7809#endif
7810 }
7811
7812 /*
7813 * Check SVM nested-guest mwait intercepts.
7814 */
7815 if (!IEM_IS_IN_GUEST(pVCpu))
7816 { /* probable */ }
7817 else if ( IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT_ARMED)
7818 && EMMonitorIsArmed(pVCpu))
7819 {
7820 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n"));
7821 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7822 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7823 }
7824 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT))
7825 {
7826 Log2(("mwait: Guest intercept -> #VMEXIT\n"));
7827 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7828 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7829 }
7830
7831 /*
7832 * Call EM to prepare the monitor/wait.
7833 *
7834 * This will return VINF_EM_HALT. If there the trap flag is set, we may
7835 * override it when executing iemRegAddToRipAndFinishingClearingRF ASSUMING
7836 * that will only return guest related events.
7837 */
7838 VBOXSTRICTRC rcStrict = EMMonitorWaitPerform(pVCpu, uEax, uEcx);
7839
7840 /** @todo finish: This needs more thinking as we should suppress internal
7841 * debugger events here, or we'll bugger up the guest state even more than we
7842 * alread do around VINF_EM_HALT. */
7843 VBOXSTRICTRC rcStrict2 = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7844 if (rcStrict2 != VINF_SUCCESS)
7845 {
7846 Log2(("mwait: %Rrc (perform) -> %Rrc (finish)!\n", VBOXSTRICTRC_VAL(rcStrict), VBOXSTRICTRC_VAL(rcStrict2) ));
7847 rcStrict = rcStrict2;
7848 }
7849
7850 return rcStrict;
7851}
7852
7853
7854/**
7855 * Implements 'SWAPGS'.
7856 */
7857IEM_CIMPL_DEF_0(iemCImpl_swapgs)
7858{
7859 Assert(IEM_IS_64BIT_CODE(pVCpu)); /* Caller checks this. */
7860
7861 /*
7862 * Permission checks.
7863 */
7864 if (IEM_GET_CPL(pVCpu) != 0)
7865 {
7866 Log2(("swapgs: CPL != 0\n"));
7867 return iemRaiseUndefinedOpcode(pVCpu);
7868 }
7869
7870 /*
7871 * Do the job.
7872 */
7873 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_GS);
7874 uint64_t uOtherGsBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;
7875 pVCpu->cpum.GstCtx.msrKERNELGSBASE = pVCpu->cpum.GstCtx.gs.u64Base;
7876 pVCpu->cpum.GstCtx.gs.u64Base = uOtherGsBase;
7877
7878 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
7879}
7880
7881
7882#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
7883/**
7884 * Handles a CPUID call.
7885 */
7886static VBOXSTRICTRC iemCpuIdVBoxCall(PVMCPUCC pVCpu, uint32_t iFunction,
7887 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx)
7888{
7889 switch (iFunction)
7890 {
7891 case VBOX_CPUID_FN_ID:
7892 LogFlow(("iemCpuIdVBoxCall: VBOX_CPUID_FN_ID\n"));
7893 *pEax = VBOX_CPUID_RESP_ID_EAX;
7894 *pEbx = VBOX_CPUID_RESP_ID_EBX;
7895 *pEcx = VBOX_CPUID_RESP_ID_ECX;
7896 *pEdx = VBOX_CPUID_RESP_ID_EDX;
7897 break;
7898
7899 case VBOX_CPUID_FN_LOG:
7900 {
7901 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX | CPUMCTX_EXTRN_RSI
7902 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
7903
7904 /* Validate input. */
7905 uint32_t cchToLog = *pEdx;
7906 if (cchToLog <= _2M)
7907 {
7908 uint32_t const uLogPicker = *pEbx;
7909 if (uLogPicker <= 1)
7910 {
7911 /* Resolve the logger. */
7912 PRTLOGGER const pLogger = !uLogPicker
7913 ? RTLogDefaultInstanceEx(UINT32_MAX) : RTLogRelGetDefaultInstanceEx(UINT32_MAX);
7914 if (pLogger)
7915 {
7916 /* Copy over the data: */
7917 RTGCPTR GCPtrSrc = pVCpu->cpum.GstCtx.rsi;
7918 while (cchToLog > 0)
7919 {
7920 uint32_t cbToMap = GUEST_PAGE_SIZE - (GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
7921 if (cbToMap > cchToLog)
7922 cbToMap = cchToLog;
7923 /** @todo Extend iemMemMap to allowing page size accessing and avoid 7
7924 * unnecessary calls & iterations per pages. */
7925 if (cbToMap > 512)
7926 cbToMap = 512;
7927 uint8_t bUnmapInfo;
7928 void *pvSrc = NULL;
7929 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvSrc, &bUnmapInfo, cbToMap,
7930 UINT8_MAX, GCPtrSrc, IEM_ACCESS_DATA_R, 0);
7931 if (rcStrict == VINF_SUCCESS)
7932 {
7933 RTLogBulkNestedWrite(pLogger, (const char *)pvSrc, cbToMap, "Gst:");
7934 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
7935 AssertRCSuccessReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
7936 }
7937 else
7938 {
7939 Log(("iemCpuIdVBoxCall: %Rrc at %RGp LB %#x\n", VBOXSTRICTRC_VAL(rcStrict), GCPtrSrc, cbToMap));
7940 return rcStrict;
7941 }
7942
7943 /* Advance. */
7944 pVCpu->cpum.GstCtx.rsi = GCPtrSrc += cbToMap;
7945 *pEdx = cchToLog -= cbToMap;
7946 }
7947 *pEax = VINF_SUCCESS;
7948 }
7949 else
7950 *pEax = (uint32_t)VERR_NOT_FOUND;
7951 }
7952 else
7953 *pEax = (uint32_t)VERR_NOT_FOUND;
7954 }
7955 else
7956 *pEax = (uint32_t)VERR_TOO_MUCH_DATA;
7957 *pEdx = VBOX_CPUID_RESP_GEN_EDX;
7958 *pEcx = VBOX_CPUID_RESP_GEN_ECX;
7959 *pEbx = VBOX_CPUID_RESP_GEN_EBX;
7960 break;
7961 }
7962
7963 default:
7964 LogFlow(("iemCpuIdVBoxCall: Invalid function %#x (%#x, %#x)\n", iFunction, *pEbx, *pEdx));
7965 *pEax = (uint32_t)VERR_INVALID_FUNCTION;
7966 *pEbx = (uint32_t)VERR_INVALID_FUNCTION;
7967 *pEcx = (uint32_t)VERR_INVALID_FUNCTION;
7968 *pEdx = (uint32_t)VERR_INVALID_FUNCTION;
7969 break;
7970 }
7971 return VINF_SUCCESS;
7972}
7973#endif /* VBOX_WITHOUT_CPUID_HOST_CALL */
7974
7975/**
7976 * Implements 'CPUID'.
7977 */
7978IEM_CIMPL_DEF_0(iemCImpl_cpuid)
7979{
7980 if (!IEM_IS_IN_GUEST(pVCpu))
7981 { /* probable */ }
7982 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7983 {
7984 Log2(("cpuid: Guest intercept -> VM-exit\n"));
7985 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_CPUID, cbInstr);
7986 }
7987 else if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CPUID))
7988 {
7989 Log2(("cpuid: Guest intercept -> #VMEXIT\n"));
7990 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
7991 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
7992 }
7993
7994
7995 uint32_t const uEax = pVCpu->cpum.GstCtx.eax;
7996 uint32_t const uEcx = pVCpu->cpum.GstCtx.ecx;
7997
7998#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
7999 /*
8000 * CPUID host call backdoor.
8001 */
8002 if ( uEax == VBOX_CPUID_REQ_EAX_FIXED
8003 && (uEcx & VBOX_CPUID_REQ_ECX_FIXED_MASK) == VBOX_CPUID_REQ_ECX_FIXED
8004 && pVCpu->CTX_SUFF(pVM)->iem.s.fCpuIdHostCall)
8005 {
8006 VBOXSTRICTRC rcStrict = iemCpuIdVBoxCall(pVCpu, uEcx & VBOX_CPUID_REQ_ECX_FN_MASK,
8007 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx,
8008 &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
8009 if (rcStrict != VINF_SUCCESS)
8010 return rcStrict;
8011 }
8012 /*
8013 * Regular CPUID.
8014 */
8015 else
8016#endif
8017 CPUMGetGuestCpuId(pVCpu, uEax, uEcx, pVCpu->cpum.GstCtx.cs.Attr.n.u1Long,
8018 &pVCpu->cpum.GstCtx.eax, &pVCpu->cpum.GstCtx.ebx, &pVCpu->cpum.GstCtx.ecx, &pVCpu->cpum.GstCtx.edx);
8019
8020 pVCpu->cpum.GstCtx.rax &= UINT32_C(0xffffffff);
8021 pVCpu->cpum.GstCtx.rbx &= UINT32_C(0xffffffff);
8022 pVCpu->cpum.GstCtx.rcx &= UINT32_C(0xffffffff);
8023 pVCpu->cpum.GstCtx.rdx &= UINT32_C(0xffffffff);
8024 pVCpu->cpum.GstCtx.fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
8025
8026 pVCpu->iem.s.cPotentialExits++;
8027 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8028}
8029
8030
8031/**
8032 * Implements 'AAD'.
8033 *
8034 * @param bImm The immediate operand.
8035 */
8036IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
8037{
8038 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
8039 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
8040 pVCpu->cpum.GstCtx.ax = al;
8041 iemHlpUpdateArithEFlagsU8(pVCpu, al,
8042 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
8043 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
8044
8045 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8046}
8047
8048
8049/**
8050 * Implements 'AAM'.
8051 *
8052 * @param bImm The immediate operand. Cannot be 0.
8053 */
8054IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
8055{
8056 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
8057
8058 uint16_t const ax = pVCpu->cpum.GstCtx.ax;
8059 uint8_t const al = (uint8_t)ax % bImm;
8060 uint8_t const ah = (uint8_t)ax / bImm;
8061 pVCpu->cpum.GstCtx.ax = (ah << 8) + al;
8062 iemHlpUpdateArithEFlagsU8(pVCpu, al,
8063 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
8064 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
8065
8066 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8067}
8068
8069
8070/**
8071 * Implements 'DAA'.
8072 */
8073IEM_CIMPL_DEF_0(iemCImpl_daa)
8074{
8075 uint8_t const al = pVCpu->cpum.GstCtx.al;
8076 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
8077
8078 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8079 || (al & 0xf) >= 10)
8080 {
8081 pVCpu->cpum.GstCtx.al = al + 6;
8082 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8083 }
8084 else
8085 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8086
8087 if (al >= 0x9a || fCarry)
8088 {
8089 pVCpu->cpum.GstCtx.al += 0x60;
8090 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8091 }
8092 else
8093 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8094
8095 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8096 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8097}
8098
8099
8100/**
8101 * Implements 'DAS'.
8102 */
8103IEM_CIMPL_DEF_0(iemCImpl_das)
8104{
8105 uint8_t const uInputAL = pVCpu->cpum.GstCtx.al;
8106 bool const fCarry = pVCpu->cpum.GstCtx.eflags.Bits.u1CF;
8107
8108 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8109 || (uInputAL & 0xf) >= 10)
8110 {
8111 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8112 if (uInputAL < 6)
8113 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8114 pVCpu->cpum.GstCtx.al = uInputAL - 6;
8115 }
8116 else
8117 {
8118 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8119 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8120 }
8121
8122 if (uInputAL >= 0x9a || fCarry)
8123 {
8124 pVCpu->cpum.GstCtx.al -= 0x60;
8125 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8126 }
8127
8128 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8129 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8130}
8131
8132
8133/**
8134 * Implements 'AAA'.
8135 */
8136IEM_CIMPL_DEF_0(iemCImpl_aaa)
8137{
8138 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8139 {
8140 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8141 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8142 {
8143 pVCpu->cpum.GstCtx.eflags.uBoth = iemAImpl_add_u16(pVCpu->cpum.GstCtx.eflags.uBoth, &pVCpu->cpum.GstCtx.ax, 0x106);
8144 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8145 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8146 }
8147 else
8148 {
8149 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8150 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8151 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8152 }
8153 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8154 }
8155 else
8156 {
8157 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8158 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8159 {
8160 pVCpu->cpum.GstCtx.ax += UINT16_C(0x106);
8161 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8162 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8163 }
8164 else
8165 {
8166 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8167 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8168 }
8169 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8170 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8171 }
8172
8173 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8174}
8175
8176
8177/**
8178 * Implements 'AAS'.
8179 */
8180IEM_CIMPL_DEF_0(iemCImpl_aas)
8181{
8182 if (IEM_IS_GUEST_CPU_AMD(pVCpu))
8183 {
8184 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8185 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8186 {
8187 pVCpu->cpum.GstCtx.eflags.uBoth = iemAImpl_sub_u16(pVCpu->cpum.GstCtx.eflags.uBoth, &pVCpu->cpum.GstCtx.ax, 0x106);
8188 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8189 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8190 }
8191 else
8192 {
8193 iemHlpUpdateArithEFlagsU16(pVCpu, pVCpu->cpum.GstCtx.ax, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8194 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8195 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8196 }
8197 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8198 }
8199 else
8200 {
8201 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1AF
8202 || (pVCpu->cpum.GstCtx.ax & 0xf) >= 10)
8203 {
8204 pVCpu->cpum.GstCtx.ax -= UINT16_C(0x106);
8205 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 1;
8206 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 1;
8207 }
8208 else
8209 {
8210 pVCpu->cpum.GstCtx.eflags.Bits.u1AF = 0;
8211 pVCpu->cpum.GstCtx.eflags.Bits.u1CF = 0;
8212 }
8213 pVCpu->cpum.GstCtx.ax &= UINT16_C(0xff0f);
8214 iemHlpUpdateArithEFlagsU8(pVCpu, pVCpu->cpum.GstCtx.al, X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF);
8215 }
8216
8217 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8218}
8219
8220
8221/**
8222 * Implements the 16-bit version of 'BOUND'.
8223 *
8224 * @note We have separate 16-bit and 32-bit variants of this function due to
8225 * the decoder using unsigned parameters, whereas we want signed one to
8226 * do the job. This is significant for a recompiler.
8227 */
8228IEM_CIMPL_DEF_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound)
8229{
8230 /*
8231 * Check if the index is inside the bounds, otherwise raise #BR.
8232 */
8233 if ( idxArray >= idxLowerBound
8234 && idxArray <= idxUpperBound)
8235 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8236 return iemRaiseBoundRangeExceeded(pVCpu);
8237}
8238
8239
8240/**
8241 * Implements the 32-bit version of 'BOUND'.
8242 */
8243IEM_CIMPL_DEF_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound)
8244{
8245 /*
8246 * Check if the index is inside the bounds, otherwise raise #BR.
8247 */
8248 if ( idxArray >= idxLowerBound
8249 && idxArray <= idxUpperBound)
8250 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8251 return iemRaiseBoundRangeExceeded(pVCpu);
8252}
8253
8254
8255
8256/*
8257 * Instantiate the various string operation combinations.
8258 */
8259#define OP_SIZE 8
8260#define ADDR_SIZE 16
8261#include "IEMAllCImplStrInstr.cpp.h"
8262#define OP_SIZE 8
8263#define ADDR_SIZE 32
8264#include "IEMAllCImplStrInstr.cpp.h"
8265#define OP_SIZE 8
8266#define ADDR_SIZE 64
8267#include "IEMAllCImplStrInstr.cpp.h"
8268
8269#define OP_SIZE 16
8270#define ADDR_SIZE 16
8271#include "IEMAllCImplStrInstr.cpp.h"
8272#define OP_SIZE 16
8273#define ADDR_SIZE 32
8274#include "IEMAllCImplStrInstr.cpp.h"
8275#define OP_SIZE 16
8276#define ADDR_SIZE 64
8277#include "IEMAllCImplStrInstr.cpp.h"
8278
8279#define OP_SIZE 32
8280#define ADDR_SIZE 16
8281#include "IEMAllCImplStrInstr.cpp.h"
8282#define OP_SIZE 32
8283#define ADDR_SIZE 32
8284#include "IEMAllCImplStrInstr.cpp.h"
8285#define OP_SIZE 32
8286#define ADDR_SIZE 64
8287#include "IEMAllCImplStrInstr.cpp.h"
8288
8289#define OP_SIZE 64
8290#define ADDR_SIZE 32
8291#include "IEMAllCImplStrInstr.cpp.h"
8292#define OP_SIZE 64
8293#define ADDR_SIZE 64
8294#include "IEMAllCImplStrInstr.cpp.h"
8295
8296
8297/**
8298 * Implements 'XGETBV'.
8299 */
8300IEM_CIMPL_DEF_0(iemCImpl_xgetbv)
8301{
8302 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
8303 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8304 {
8305 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8306 switch (uEcx)
8307 {
8308 case 0:
8309 break;
8310
8311 case 1: /** @todo Implement XCR1 support. */
8312 default:
8313 Log(("xgetbv ecx=%RX32 -> #GP(0)\n", uEcx));
8314 return iemRaiseGeneralProtectionFault0(pVCpu);
8315
8316 }
8317 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8318 pVCpu->cpum.GstCtx.rax = RT_LO_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8319 pVCpu->cpum.GstCtx.rdx = RT_HI_U32(pVCpu->cpum.GstCtx.aXcr[uEcx]);
8320
8321 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8322 }
8323 Log(("xgetbv CR4.OSXSAVE=0 -> UD\n"));
8324 return iemRaiseUndefinedOpcode(pVCpu);
8325}
8326
8327
8328/**
8329 * Implements 'XSETBV'.
8330 */
8331IEM_CIMPL_DEF_0(iemCImpl_xsetbv)
8332{
8333 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)
8334 {
8335 /** @todo explain why this happens before the CPL check. */
8336 if (!IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_XSETBV))
8337 { /* probable */ }
8338 else
8339 {
8340 Log2(("xsetbv: Guest intercept -> #VMEXIT\n"));
8341 IEM_SVM_UPDATE_NRIP(pVCpu, cbInstr);
8342 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
8343 }
8344
8345 if (IEM_GET_CPL(pVCpu) == 0)
8346 {
8347 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_XCRx);
8348
8349 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8350 { /* probable */ }
8351 else
8352 IEM_VMX_VMEXIT_INSTR_RET(pVCpu, VMX_EXIT_XSETBV, cbInstr);
8353
8354 uint32_t uEcx = pVCpu->cpum.GstCtx.ecx;
8355 uint64_t uNewValue = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx);
8356 switch (uEcx)
8357 {
8358 case 0:
8359 {
8360 int rc = CPUMSetGuestXcr0(pVCpu, uNewValue);
8361 if (rc == VINF_SUCCESS)
8362 break;
8363 Assert(rc == VERR_CPUM_RAISE_GP_0);
8364 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8365 return iemRaiseGeneralProtectionFault0(pVCpu);
8366 }
8367
8368 case 1: /** @todo Implement XCR1 support. */
8369 default:
8370 Log(("xsetbv ecx=%RX32 (newvalue=%RX64) -> #GP(0)\n", uEcx, uNewValue));
8371 return iemRaiseGeneralProtectionFault0(pVCpu);
8372
8373 }
8374
8375 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8376 }
8377
8378 Log(("xsetbv cpl=%u -> GP(0)\n", IEM_GET_CPL(pVCpu)));
8379 return iemRaiseGeneralProtectionFault0(pVCpu);
8380 }
8381 Log(("xsetbv CR4.OSXSAVE=0 -> UD\n"));
8382 return iemRaiseUndefinedOpcode(pVCpu);
8383}
8384
8385#ifndef RT_ARCH_ARM64
8386# ifdef IN_RING3
8387
8388/** Argument package for iemCImpl_cmpxchg16b_fallback_rendezvous_callback. */
8389struct IEMCIMPLCX16ARGS
8390{
8391 PRTUINT128U pu128Dst;
8392 PRTUINT128U pu128RaxRdx;
8393 PRTUINT128U pu128RbxRcx;
8394 uint32_t *pEFlags;
8395# ifdef VBOX_STRICT
8396 uint32_t cCalls;
8397# endif
8398};
8399
8400/**
8401 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
8402 * Worker for iemCImpl_cmpxchg16b_fallback_rendezvous}
8403 */
8404static DECLCALLBACK(VBOXSTRICTRC) iemCImpl_cmpxchg16b_fallback_rendezvous_callback(PVM pVM, PVMCPUCC pVCpu, void *pvUser)
8405{
8406 RT_NOREF(pVM, pVCpu);
8407 struct IEMCIMPLCX16ARGS *pArgs = (struct IEMCIMPLCX16ARGS *)pvUser;
8408# ifdef VBOX_STRICT
8409 Assert(pArgs->cCalls == 0);
8410 pArgs->cCalls++;
8411# endif
8412
8413 iemAImpl_cmpxchg16b_fallback(pArgs->pu128Dst, pArgs->pu128RaxRdx, pArgs->pu128RbxRcx, pArgs->pEFlags);
8414 return VINF_SUCCESS;
8415}
8416
8417# endif /* IN_RING3 */
8418
8419/**
8420 * Implements 'CMPXCHG16B' fallback using rendezvous.
8421 */
8422IEM_CIMPL_DEF_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
8423 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo)
8424{
8425# ifdef IN_RING3
8426 struct IEMCIMPLCX16ARGS Args;
8427 Args.pu128Dst = pu128Dst;
8428 Args.pu128RaxRdx = pu128RaxRdx;
8429 Args.pu128RbxRcx = pu128RbxRcx;
8430 Args.pEFlags = pEFlags;
8431# ifdef VBOX_STRICT
8432 Args.cCalls = 0;
8433# endif
8434 VBOXSTRICTRC rcStrict = VMMR3EmtRendezvous(pVCpu->CTX_SUFF(pVM), VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE,
8435 iemCImpl_cmpxchg16b_fallback_rendezvous_callback, &Args);
8436 Assert(Args.cCalls == 1);
8437 if (rcStrict == VINF_SUCCESS)
8438 {
8439 /* Duplicated tail code. */
8440 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8441 if (rcStrict == VINF_SUCCESS)
8442 {
8443 pVCpu->cpum.GstCtx.eflags.u = *pEFlags; /* IEM_MC_COMMIT_EFLAGS */
8444 if (!(*pEFlags & X86_EFL_ZF))
8445 {
8446 pVCpu->cpum.GstCtx.rax = pu128RaxRdx->s.Lo;
8447 pVCpu->cpum.GstCtx.rdx = pu128RaxRdx->s.Hi;
8448 }
8449 rcStrict = iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8450 }
8451 }
8452 return rcStrict;
8453# else
8454 RT_NOREF(pVCpu, cbInstr, pu128Dst, pu128RaxRdx, pu128RbxRcx, pEFlags, bUnmapInfo);
8455 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; /* This should get us to ring-3 for now. Should perhaps be replaced later. */
8456# endif
8457}
8458
8459#endif /* RT_ARCH_ARM64 */
8460
8461/**
8462 * Implements 'CLFLUSH' and 'CLFLUSHOPT'.
8463 *
8464 * This is implemented in C because it triggers a load like behaviour without
8465 * actually reading anything. Since that's not so common, it's implemented
8466 * here.
8467 *
8468 * @param iEffSeg The effective segment.
8469 * @param GCPtrEff The address of the image.
8470 */
8471IEM_CIMPL_DEF_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
8472{
8473 /*
8474 * Pretend to do a load w/o reading (see also iemCImpl_monitor and iemMemMap).
8475 */
8476 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrEff);
8477 if (rcStrict == VINF_SUCCESS)
8478 {
8479 RTGCPHYS GCPhysMem;
8480 /** @todo access size. */
8481 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrEff, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
8482 if (rcStrict == VINF_SUCCESS)
8483 {
8484#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8485 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8486 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
8487 { /* probable */ }
8488 else
8489 {
8490 /*
8491 * CLFLUSH/CLFLUSHOPT does not access the memory, but flushes the cache-line
8492 * that contains the address. However, if the address falls in the APIC-access
8493 * page, the address flushed must instead be the corresponding address in the
8494 * virtual-APIC page.
8495 *
8496 * See Intel spec. 29.4.4 "Instruction-Specific Considerations".
8497 */
8498 rcStrict = iemVmxVirtApicAccessUnused(pVCpu, &GCPhysMem, 1, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA);
8499 if ( rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE
8500 && rcStrict != VINF_VMX_MODIFIES_BEHAVIOR)
8501 return rcStrict;
8502 }
8503#endif
8504 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8505 }
8506 }
8507
8508 return rcStrict;
8509}
8510
8511
8512/**
8513 * Implements 'FINIT' and 'FNINIT'.
8514 *
8515 * @param fCheckXcpts Whether to check for umasked pending exceptions or
8516 * not.
8517 */
8518IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
8519{
8520 /*
8521 * Exceptions.
8522 */
8523 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
8524 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
8525 return iemRaiseDeviceNotAvailable(pVCpu);
8526
8527 iemFpuActualizeStateForChange(pVCpu);
8528 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_X87);
8529
8530 /* FINIT: Raise #MF on pending exception(s): */
8531 if (fCheckXcpts && (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))
8532 return iemRaiseMathFault(pVCpu);
8533
8534 /*
8535 * Reset the state.
8536 */
8537 PX86XSAVEAREA pXState = &pVCpu->cpum.GstCtx.XState;
8538
8539 /* Rotate the stack to account for changed TOS. */
8540 iemFpuRotateStackSetTop(&pXState->x87, 0);
8541
8542 pXState->x87.FCW = 0x37f;
8543 pXState->x87.FSW = 0;
8544 pXState->x87.FTW = 0x00; /* 0 - empty. */
8545 /** @todo Intel says the instruction and data pointers are not cleared on
8546 * 387, presume that 8087 and 287 doesn't do so either. */
8547 /** @todo test this stuff. */
8548 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_386)
8549 {
8550 pXState->x87.FPUDP = 0;
8551 pXState->x87.DS = 0; //??
8552 pXState->x87.Rsrvd2 = 0;
8553 pXState->x87.FPUIP = 0;
8554 pXState->x87.CS = 0; //??
8555 pXState->x87.Rsrvd1 = 0;
8556 }
8557 pXState->x87.FOP = 0;
8558
8559 iemHlpUsedFpu(pVCpu);
8560 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8561}
8562
8563
8564/**
8565 * Implements 'FXSAVE'.
8566 *
8567 * @param iEffSeg The effective segment.
8568 * @param GCPtrEff The address of the image.
8569 * @param enmEffOpSize The operand size (only REX.W really matters).
8570 */
8571IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8572{
8573 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8574
8575 /** @todo check out bugref{1529} and AMD behaviour */
8576
8577 /*
8578 * Raise exceptions.
8579 */
8580 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8581 return iemRaiseDeviceNotAvailable(pVCpu);
8582
8583 /*
8584 * Access the memory.
8585 */
8586 uint8_t bUnmapInfo;
8587 void *pvMem512;
8588 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfo, 512,
8589 iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8590 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8591 if (rcStrict != VINF_SUCCESS)
8592 return rcStrict;
8593 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8594 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8595
8596 /*
8597 * Store the registers.
8598 */
8599 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8600 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
8601
8602 /* common for all formats */
8603 pDst->FCW = pSrc->FCW;
8604 pDst->FSW = pSrc->FSW;
8605 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8606 pDst->FOP = pSrc->FOP;
8607 pDst->MXCSR = pSrc->MXCSR;
8608 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8609 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8610 {
8611 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8612 * them for now... */
8613 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8614 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8615 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8616 pDst->aRegs[i].au32[3] = 0;
8617 }
8618
8619 /* FPU IP, CS, DP and DS. */
8620 pDst->FPUIP = pSrc->FPUIP;
8621 pDst->CS = pSrc->CS;
8622 pDst->FPUDP = pSrc->FPUDP;
8623 pDst->DS = pSrc->DS;
8624 if (enmEffOpSize == IEMMODE_64BIT)
8625 {
8626 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8627 pDst->Rsrvd1 = pSrc->Rsrvd1;
8628 pDst->Rsrvd2 = pSrc->Rsrvd2;
8629 }
8630 else
8631 {
8632 pDst->Rsrvd1 = 0;
8633 pDst->Rsrvd2 = 0;
8634 }
8635
8636 /* XMM registers. Skipped in 64-bit CPL0 if EFER.FFXSR (AMD only) is set. */
8637 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8638 || !IEM_IS_64BIT_CODE(pVCpu)
8639 || IEM_GET_CPL(pVCpu) != 0)
8640 {
8641 uint32_t cXmmRegs = IEM_IS_64BIT_CODE(pVCpu) ? 16 : 8;
8642 for (uint32_t i = 0; i < cXmmRegs; i++)
8643 pDst->aXMM[i] = pSrc->aXMM[i];
8644 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8645 * right? */
8646 }
8647
8648 /*
8649 * Commit the memory.
8650 */
8651 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8652 if (rcStrict != VINF_SUCCESS)
8653 return rcStrict;
8654
8655 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8656}
8657
8658
8659/**
8660 * Implements 'FXRSTOR'.
8661 *
8662 * @param iEffSeg The effective segment register for @a GCPtrEff.
8663 * @param GCPtrEff The address of the image.
8664 * @param enmEffOpSize The operand size (only REX.W really matters).
8665 */
8666IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8667{
8668 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
8669
8670 /** @todo check out bugref{1529} and AMD behaviour */
8671
8672 /*
8673 * Raise exceptions.
8674 */
8675 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_TS | X86_CR0_EM))
8676 return iemRaiseDeviceNotAvailable(pVCpu);
8677
8678 /*
8679 * Access the memory.
8680 */
8681 uint8_t bUnmapInfo;
8682 void *pvMem512;
8683 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfo, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8684 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8685 if (rcStrict != VINF_SUCCESS)
8686 return rcStrict;
8687 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8688 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8689
8690 /*
8691 * Check the state for stuff which will #GP(0).
8692 */
8693 uint32_t const fMXCSR = pSrc->MXCSR;
8694 uint32_t const fMXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8695 if (fMXCSR & ~fMXCSR_MASK)
8696 {
8697 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
8698 return iemRaiseGeneralProtectionFault0(pVCpu);
8699 }
8700
8701 /*
8702 * Load the registers.
8703 */
8704 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
8705 * implementation specific whether MXCSR and XMM0-XMM7 are
8706 * restored according to Intel.
8707 * AMD says MXCSR and XMM registers are never loaded if
8708 * CR4.OSFXSR=0.
8709 */
8710
8711 /* common for all formats */
8712 pDst->FCW = pSrc->FCW;
8713 pDst->FSW = pSrc->FSW;
8714 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8715 pDst->FOP = pSrc->FOP;
8716 pDst->MXCSR = fMXCSR;
8717 /* (MXCSR_MASK is read-only) */
8718 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
8719 {
8720 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8721 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8722 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8723 pDst->aRegs[i].au32[3] = 0;
8724 }
8725
8726 /* FPU IP, CS, DP and DS. */
8727 /** @todo AMD says this is only done if FSW.ES is set after loading. */
8728 if (enmEffOpSize == IEMMODE_64BIT)
8729 {
8730 pDst->FPUIP = pSrc->FPUIP;
8731 pDst->CS = pSrc->CS;
8732 pDst->Rsrvd1 = pSrc->Rsrvd1;
8733 pDst->FPUDP = pSrc->FPUDP;
8734 pDst->DS = pSrc->DS;
8735 pDst->Rsrvd2 = pSrc->Rsrvd2;
8736 }
8737 else
8738 {
8739 pDst->FPUIP = pSrc->FPUIP;
8740 pDst->CS = pSrc->CS;
8741 pDst->Rsrvd1 = 0;
8742 pDst->FPUDP = pSrc->FPUDP;
8743 pDst->DS = pSrc->DS;
8744 pDst->Rsrvd2 = 0;
8745 }
8746
8747 /* XMM registers. Skipped in 64-bit CPL0 if EFER.FFXSR (AMD only) is set.
8748 * Does not affect MXCSR, only registers.
8749 */
8750 if ( !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_FFXSR)
8751 || !IEM_IS_64BIT_CODE(pVCpu)
8752 || IEM_GET_CPL(pVCpu) != 0)
8753 {
8754 uint32_t cXmmRegs = IEM_IS_64BIT_CODE(pVCpu) ? 16 : 8;
8755 for (uint32_t i = 0; i < cXmmRegs; i++)
8756 pDst->aXMM[i] = pSrc->aXMM[i];
8757 }
8758
8759 pDst->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
8760 iemFpuRecalcExceptionStatus(pDst);
8761
8762 if (pDst->FSW & X86_FSW_ES)
8763 Log11(("fxrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
8764 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
8765
8766 /*
8767 * Unmap the memory.
8768 */
8769 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
8770 if (rcStrict != VINF_SUCCESS)
8771 return rcStrict;
8772
8773 iemHlpUsedFpu(pVCpu);
8774 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8775}
8776
8777
8778/**
8779 * Implements 'XSAVE'.
8780 *
8781 * @param iEffSeg The effective segment.
8782 * @param GCPtrEff The address of the image.
8783 * @param enmEffOpSize The operand size (only REX.W really matters).
8784 */
8785IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8786{
8787 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8788
8789 /*
8790 * Raise exceptions.
8791 */
8792 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8793 return iemRaiseUndefinedOpcode(pVCpu);
8794 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8795 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8796 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS)))
8797 { /* likely */ }
8798 else
8799 {
8800 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8801 return iemRaiseUndefinedOpcode(pVCpu);
8802 }
8803 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8804 return iemRaiseDeviceNotAvailable(pVCpu);
8805
8806 /*
8807 * Calc the requested mask.
8808 */
8809 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
8810 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
8811 uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
8812
8813/** @todo figure out the exact protocol for the memory access. Currently we
8814 * just need this crap to work halfways to make it possible to test
8815 * AVX instructions. */
8816/** @todo figure out the XINUSE and XMODIFIED */
8817
8818 /*
8819 * Access the x87 memory state.
8820 */
8821 /* The x87+SSE state. */
8822 uint8_t bUnmapInfoMem512;
8823 void *pvMem512;
8824 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfoMem512, 512,
8825 iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
8826 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8827 if (rcStrict != VINF_SUCCESS)
8828 return rcStrict;
8829 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
8830 PCX86FXSTATE pSrc = &pVCpu->cpum.GstCtx.XState.x87;
8831
8832 /* The header. */
8833 uint8_t bUnmapInfoHdr;
8834 PX86XSAVEHDR pHdr;
8835 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, &bUnmapInfoHdr, sizeof(pHdr),
8836 iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW, 0 /* checked above */);
8837 if (rcStrict != VINF_SUCCESS)
8838 return rcStrict;
8839
8840 /*
8841 * Store the X87 state.
8842 */
8843 if (fReqComponents & XSAVE_C_X87)
8844 {
8845 /* common for all formats */
8846 pDst->FCW = pSrc->FCW;
8847 pDst->FSW = pSrc->FSW;
8848 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
8849 pDst->FOP = pSrc->FOP;
8850 pDst->FPUIP = pSrc->FPUIP;
8851 pDst->CS = pSrc->CS;
8852 pDst->FPUDP = pSrc->FPUDP;
8853 pDst->DS = pSrc->DS;
8854 if (enmEffOpSize == IEMMODE_64BIT)
8855 {
8856 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
8857 pDst->Rsrvd1 = pSrc->Rsrvd1;
8858 pDst->Rsrvd2 = pSrc->Rsrvd2;
8859 }
8860 else
8861 {
8862 pDst->Rsrvd1 = 0;
8863 pDst->Rsrvd2 = 0;
8864 }
8865 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
8866 {
8867 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
8868 * them for now... */
8869 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
8870 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
8871 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
8872 pDst->aRegs[i].au32[3] = 0;
8873 }
8874
8875 }
8876
8877 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
8878 {
8879 pDst->MXCSR = pSrc->MXCSR;
8880 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
8881 }
8882
8883 if (fReqComponents & XSAVE_C_SSE)
8884 {
8885 /* XMM registers. */
8886 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8887 for (uint32_t i = 0; i < cXmmRegs; i++)
8888 pDst->aXMM[i] = pSrc->aXMM[i];
8889 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
8890 * right? */
8891 }
8892
8893 /* Commit the x87 state bits. (probably wrong) */
8894 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoMem512);
8895 if (rcStrict != VINF_SUCCESS)
8896 return rcStrict;
8897
8898 /*
8899 * Store AVX state.
8900 */
8901 if (fReqComponents & XSAVE_C_YMM)
8902 {
8903 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
8904 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
8905 uint8_t bUnmapInfoComp;
8906 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI);
8907 PX86XSAVEYMMHI pCompDst;
8908 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, &bUnmapInfoComp, sizeof(*pCompDst), iEffSeg,
8909 GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
8910 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 0 /* checked above */);
8911 if (rcStrict != VINF_SUCCESS)
8912 return rcStrict;
8913
8914 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
8915 for (uint32_t i = 0; i < cXmmRegs; i++)
8916 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i];
8917
8918 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoComp);
8919 if (rcStrict != VINF_SUCCESS)
8920 return rcStrict;
8921 }
8922
8923 /*
8924 * Update the header.
8925 */
8926 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents)
8927 | (fReqComponents & fXInUse);
8928
8929 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoHdr);
8930 if (rcStrict != VINF_SUCCESS)
8931 return rcStrict;
8932
8933 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
8934}
8935
8936
8937/**
8938 * Implements 'XRSTOR'.
8939 *
8940 * @param iEffSeg The effective segment.
8941 * @param GCPtrEff The address of the image.
8942 * @param enmEffOpSize The operand size (only REX.W really matters).
8943 */
8944IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
8945{
8946 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
8947
8948 /*
8949 * Raise exceptions.
8950 */
8951 if (!(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
8952 return iemRaiseUndefinedOpcode(pVCpu);
8953 /* When in VMX non-root mode and XSAVE/XRSTOR is not enabled, it results in #UD. */
8954 if (RT_LIKELY( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
8955 || IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_XSAVES_XRSTORS)))
8956 { /* likely */ }
8957 else
8958 {
8959 Log(("xrstor: Not enabled for nested-guest execution -> #UD\n"));
8960 return iemRaiseUndefinedOpcode(pVCpu);
8961 }
8962 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS)
8963 return iemRaiseDeviceNotAvailable(pVCpu);
8964 if (GCPtrEff & 63)
8965 {
8966 /** @todo CPU/VM detection possible! \#AC might not be signal for
8967 * all/any misalignment sizes, intel says its an implementation detail. */
8968 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
8969 && pVCpu->cpum.GstCtx.eflags.Bits.u1AC
8970 && IEM_GET_CPL(pVCpu) == 3)
8971 return iemRaiseAlignmentCheckException(pVCpu);
8972 return iemRaiseGeneralProtectionFault0(pVCpu);
8973 }
8974
8975/** @todo figure out the exact protocol for the memory access. Currently we
8976 * just need this crap to work halfways to make it possible to test
8977 * AVX instructions. */
8978/** @todo figure out the XINUSE and XMODIFIED */
8979
8980 /*
8981 * Access the x87 memory state.
8982 */
8983 /* The x87+SSE state. */
8984 uint8_t bUnmapInfoMem512;
8985 void *pvMem512;
8986 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, &bUnmapInfoMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R,
8987 63 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC);
8988 if (rcStrict != VINF_SUCCESS)
8989 return rcStrict;
8990 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
8991 PX86FXSTATE pDst = &pVCpu->cpum.GstCtx.XState.x87;
8992
8993 /*
8994 * Calc the requested mask
8995 */
8996 uint8_t bUnmapInfoHdr;
8997 PX86XSAVEHDR pHdrDst = &pVCpu->cpum.GstCtx.XState.Hdr;
8998 PCX86XSAVEHDR pHdrSrc;
8999 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, &bUnmapInfoHdr, sizeof(*pHdrSrc), iEffSeg, GCPtrEff + 512,
9000 IEM_ACCESS_DATA_R, 0 /* checked above */);
9001 if (rcStrict != VINF_SUCCESS)
9002 return rcStrict;
9003
9004 uint64_t const fReqComponents = RT_MAKE_U64(pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.edx) & pVCpu->cpum.GstCtx.aXcr[0];
9005 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9006 //uint64_t const fXInUse = pVCpu->cpum.GstCtx.aXcr[0];
9007 uint64_t const fRstorMask = pHdrSrc->bmXState;
9008 uint64_t const fCompMask = pHdrSrc->bmXComp;
9009
9010 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9011
9012 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
9013
9014 /* We won't need this any longer. */
9015 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoHdr);
9016 if (rcStrict != VINF_SUCCESS)
9017 return rcStrict;
9018
9019 /*
9020 * Load the X87 state.
9021 */
9022 if (fReqComponents & XSAVE_C_X87)
9023 {
9024 if (fRstorMask & XSAVE_C_X87)
9025 {
9026 pDst->FCW = pSrc->FCW;
9027 pDst->FSW = pSrc->FSW;
9028 pDst->FTW = pSrc->FTW & UINT16_C(0xff);
9029 pDst->FOP = pSrc->FOP;
9030 pDst->FPUIP = pSrc->FPUIP;
9031 pDst->CS = pSrc->CS;
9032 pDst->FPUDP = pSrc->FPUDP;
9033 pDst->DS = pSrc->DS;
9034 if (enmEffOpSize == IEMMODE_64BIT)
9035 {
9036 /* Load upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
9037 pDst->Rsrvd1 = pSrc->Rsrvd1;
9038 pDst->Rsrvd2 = pSrc->Rsrvd2;
9039 }
9040 else
9041 {
9042 pDst->Rsrvd1 = 0;
9043 pDst->Rsrvd2 = 0;
9044 }
9045 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
9046 {
9047 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
9048 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
9049 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
9050 pDst->aRegs[i].au32[3] = 0;
9051 }
9052
9053 pDst->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9054 iemFpuRecalcExceptionStatus(pDst);
9055
9056 if (pDst->FSW & X86_FSW_ES)
9057 Log11(("xrstor: %04x:%08RX64: loading state with pending FPU exception (FSW=%#x)\n",
9058 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pSrc->FSW));
9059 }
9060 else
9061 {
9062 pDst->FCW = 0x37f;
9063 pDst->FSW = 0;
9064 pDst->FTW = 0x00; /* 0 - empty. */
9065 pDst->FPUDP = 0;
9066 pDst->DS = 0; //??
9067 pDst->Rsrvd2= 0;
9068 pDst->FPUIP = 0;
9069 pDst->CS = 0; //??
9070 pDst->Rsrvd1= 0;
9071 pDst->FOP = 0;
9072 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
9073 {
9074 pDst->aRegs[i].au32[0] = 0;
9075 pDst->aRegs[i].au32[1] = 0;
9076 pDst->aRegs[i].au32[2] = 0;
9077 pDst->aRegs[i].au32[3] = 0;
9078 }
9079 }
9080 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */
9081 }
9082
9083 /* MXCSR */
9084 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM))
9085 {
9086 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM))
9087 pDst->MXCSR = pSrc->MXCSR;
9088 else
9089 pDst->MXCSR = 0x1f80;
9090 }
9091
9092 /* XMM registers. */
9093 if (fReqComponents & XSAVE_C_SSE)
9094 {
9095 if (fRstorMask & XSAVE_C_SSE)
9096 {
9097 for (uint32_t i = 0; i < cXmmRegs; i++)
9098 pDst->aXMM[i] = pSrc->aXMM[i];
9099 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
9100 * right? */
9101 }
9102 else
9103 {
9104 for (uint32_t i = 0; i < cXmmRegs; i++)
9105 {
9106 pDst->aXMM[i].au64[0] = 0;
9107 pDst->aXMM[i].au64[1] = 0;
9108 }
9109 }
9110 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */
9111 }
9112
9113 /* Unmap the x87 state bits (so we've don't run out of mapping). */
9114 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoMem512);
9115 if (rcStrict != VINF_SUCCESS)
9116 return rcStrict;
9117
9118 /*
9119 * Restore AVX state.
9120 */
9121 if (fReqComponents & XSAVE_C_YMM)
9122 {
9123 AssertLogRelReturn(pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9);
9124 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(IEM_GET_CTX(pVCpu), XSAVE_C_YMM_BIT, PX86XSAVEYMMHI);
9125
9126 if (fRstorMask & XSAVE_C_YMM)
9127 {
9128 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */
9129 uint8_t bUnmapInfoComp;
9130 PCX86XSAVEYMMHI pCompSrc;
9131 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, &bUnmapInfoComp, sizeof(*pCompDst),
9132 iEffSeg, GCPtrEff + pVCpu->cpum.GstCtx.aoffXState[XSAVE_C_YMM_BIT],
9133 IEM_ACCESS_DATA_R, 0 /* checked above */);
9134 if (rcStrict != VINF_SUCCESS)
9135 return rcStrict;
9136
9137 for (uint32_t i = 0; i < cXmmRegs; i++)
9138 {
9139 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0];
9140 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1];
9141 }
9142
9143 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoComp);
9144 if (rcStrict != VINF_SUCCESS)
9145 return rcStrict;
9146 }
9147 else
9148 {
9149 for (uint32_t i = 0; i < cXmmRegs; i++)
9150 {
9151 pCompDst->aYmmHi[i].au64[0] = 0;
9152 pCompDst->aYmmHi[i].au64[1] = 0;
9153 }
9154 }
9155 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */
9156 }
9157
9158 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9159}
9160
9161
9162
9163
9164/**
9165 * Implements 'STMXCSR'.
9166 *
9167 * @param iEffSeg The effective segment register for @a GCPtrEff.
9168 * @param GCPtrEff The address of the image.
9169 */
9170IEM_CIMPL_DEF_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9171{
9172 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9173
9174 /*
9175 * Raise exceptions.
9176 */
9177 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9178 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9179 {
9180 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9181 {
9182 /*
9183 * Do the job.
9184 */
9185 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9186 if (rcStrict == VINF_SUCCESS)
9187 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9188 return rcStrict;
9189 }
9190 return iemRaiseDeviceNotAvailable(pVCpu);
9191 }
9192 return iemRaiseUndefinedOpcode(pVCpu);
9193}
9194
9195
9196/**
9197 * Implements 'VSTMXCSR'.
9198 *
9199 * @param iEffSeg The effective segment register for @a GCPtrEff.
9200 * @param GCPtrEff The address of the image.
9201 */
9202IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9203{
9204 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_XCRx);
9205
9206 /*
9207 * Raise exceptions.
9208 */
9209 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu)
9210 ? (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM)
9211 : !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */
9212 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE))
9213 {
9214 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9215 {
9216 /*
9217 * Do the job.
9218 */
9219 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pVCpu->cpum.GstCtx.XState.x87.MXCSR);
9220 if (rcStrict == VINF_SUCCESS)
9221 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9222 return rcStrict;
9223 }
9224 return iemRaiseDeviceNotAvailable(pVCpu);
9225 }
9226 return iemRaiseUndefinedOpcode(pVCpu);
9227}
9228
9229
9230/**
9231 * Implements 'LDMXCSR'.
9232 *
9233 * @param iEffSeg The effective segment register for @a GCPtrEff.
9234 * @param GCPtrEff The address of the image.
9235 */
9236IEM_CIMPL_DEF_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff)
9237{
9238 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX);
9239
9240 /*
9241 * Raise exceptions.
9242 */
9243 /** @todo testcase - order of LDMXCSR faults. Does \#PF, \#GP and \#SS
9244 * happen after or before \#UD and \#EM? */
9245 if ( !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM)
9246 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR))
9247 {
9248 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS))
9249 {
9250 /*
9251 * Do the job.
9252 */
9253 uint32_t fNewMxCsr;
9254 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, &fNewMxCsr, iEffSeg, GCPtrEff);
9255 if (rcStrict == VINF_SUCCESS)
9256 {
9257 uint32_t const fMxCsrMask = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM));
9258 if (!(fNewMxCsr & ~fMxCsrMask))
9259 {
9260 pVCpu->cpum.GstCtx.XState.x87.MXCSR = fNewMxCsr;
9261 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9262 }
9263 Log(("ldmxcsr: New MXCSR=%#RX32 & ~MASK=%#RX32 = %#RX32 -> #GP(0)\n",
9264 fNewMxCsr, fMxCsrMask, fNewMxCsr & ~fMxCsrMask));
9265 return iemRaiseGeneralProtectionFault0(pVCpu);
9266 }
9267 return rcStrict;
9268 }
9269 return iemRaiseDeviceNotAvailable(pVCpu);
9270 }
9271 return iemRaiseUndefinedOpcode(pVCpu);
9272}
9273
9274
9275/**
9276 * Commmon routine for fnstenv and fnsave.
9277 *
9278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9279 * @param enmEffOpSize The effective operand size.
9280 * @param uPtr Where to store the state.
9281 */
9282static void iemCImplCommonFpuStoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr)
9283{
9284 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9285 PCX86FXSTATE pSrcX87 = &pVCpu->cpum.GstCtx.XState.x87;
9286 if (enmEffOpSize == IEMMODE_16BIT)
9287 {
9288 uPtr.pu16[0] = pSrcX87->FCW;
9289 uPtr.pu16[1] = pSrcX87->FSW;
9290 uPtr.pu16[2] = iemFpuCalcFullFtw(pSrcX87);
9291 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9292 {
9293 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
9294 * protected mode or long mode and we save it in real mode? And vice
9295 * versa? And with 32-bit operand size? I think CPU is storing the
9296 * effective address ((CS << 4) + IP) in the offset register and not
9297 * doing any address calculations here. */
9298 uPtr.pu16[3] = (uint16_t)pSrcX87->FPUIP;
9299 uPtr.pu16[4] = ((pSrcX87->FPUIP >> 4) & UINT16_C(0xf000)) | pSrcX87->FOP;
9300 uPtr.pu16[5] = (uint16_t)pSrcX87->FPUDP;
9301 uPtr.pu16[6] = (pSrcX87->FPUDP >> 4) & UINT16_C(0xf000);
9302 }
9303 else
9304 {
9305 uPtr.pu16[3] = pSrcX87->FPUIP;
9306 uPtr.pu16[4] = pSrcX87->CS;
9307 uPtr.pu16[5] = pSrcX87->FPUDP;
9308 uPtr.pu16[6] = pSrcX87->DS;
9309 }
9310 }
9311 else
9312 {
9313 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
9314 uPtr.pu16[0*2] = pSrcX87->FCW;
9315 uPtr.pu16[0*2+1] = 0xffff; /* (0xffff observed on intel skylake.) */
9316 uPtr.pu16[1*2] = pSrcX87->FSW;
9317 uPtr.pu16[1*2+1] = 0xffff;
9318 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pSrcX87);
9319 uPtr.pu16[2*2+1] = 0xffff;
9320 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9321 {
9322 uPtr.pu16[3*2] = (uint16_t)pSrcX87->FPUIP;
9323 uPtr.pu32[4] = ((pSrcX87->FPUIP & UINT32_C(0xffff0000)) >> 4) | pSrcX87->FOP;
9324 uPtr.pu16[5*2] = (uint16_t)pSrcX87->FPUDP;
9325 uPtr.pu32[6] = (pSrcX87->FPUDP & UINT32_C(0xffff0000)) >> 4;
9326 }
9327 else
9328 {
9329 uPtr.pu32[3] = pSrcX87->FPUIP;
9330 uPtr.pu16[4*2] = pSrcX87->CS;
9331 uPtr.pu16[4*2+1] = pSrcX87->FOP;
9332 uPtr.pu32[5] = pSrcX87->FPUDP;
9333 uPtr.pu16[6*2] = pSrcX87->DS;
9334 uPtr.pu16[6*2+1] = 0xffff;
9335 }
9336 }
9337}
9338
9339
9340/**
9341 * Commmon routine for fldenv and frstor
9342 *
9343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9344 * @param enmEffOpSize The effective operand size.
9345 * @param uPtr Where to store the state.
9346 */
9347static void iemCImplCommonFpuRestoreEnv(PVMCPUCC pVCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr)
9348{
9349 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9350 PX86FXSTATE pDstX87 = &pVCpu->cpum.GstCtx.XState.x87;
9351 if (enmEffOpSize == IEMMODE_16BIT)
9352 {
9353 pDstX87->FCW = uPtr.pu16[0];
9354 pDstX87->FSW = uPtr.pu16[1];
9355 pDstX87->FTW = uPtr.pu16[2];
9356 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9357 {
9358 pDstX87->FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
9359 pDstX87->FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
9360 pDstX87->FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
9361 pDstX87->CS = 0;
9362 pDstX87->Rsrvd1= 0;
9363 pDstX87->DS = 0;
9364 pDstX87->Rsrvd2= 0;
9365 }
9366 else
9367 {
9368 pDstX87->FPUIP = uPtr.pu16[3];
9369 pDstX87->CS = uPtr.pu16[4];
9370 pDstX87->Rsrvd1= 0;
9371 pDstX87->FPUDP = uPtr.pu16[5];
9372 pDstX87->DS = uPtr.pu16[6];
9373 pDstX87->Rsrvd2= 0;
9374 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
9375 }
9376 }
9377 else
9378 {
9379 pDstX87->FCW = uPtr.pu16[0*2];
9380 pDstX87->FSW = uPtr.pu16[1*2];
9381 pDstX87->FTW = uPtr.pu16[2*2];
9382 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
9383 {
9384 pDstX87->FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
9385 pDstX87->FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
9386 pDstX87->FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
9387 pDstX87->CS = 0;
9388 pDstX87->Rsrvd1= 0;
9389 pDstX87->DS = 0;
9390 pDstX87->Rsrvd2= 0;
9391 }
9392 else
9393 {
9394 pDstX87->FPUIP = uPtr.pu32[3];
9395 pDstX87->CS = uPtr.pu16[4*2];
9396 pDstX87->Rsrvd1= 0;
9397 pDstX87->FOP = uPtr.pu16[4*2+1];
9398 pDstX87->FPUDP = uPtr.pu32[5];
9399 pDstX87->DS = uPtr.pu16[6*2];
9400 pDstX87->Rsrvd2= 0;
9401 }
9402 }
9403
9404 /* Make adjustments. */
9405 pDstX87->FTW = iemFpuCompressFtw(pDstX87->FTW);
9406#ifdef LOG_ENABLED
9407 uint16_t const fOldFsw = pDstX87->FSW;
9408#endif
9409 pDstX87->FCW &= ~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK; /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9410 iemFpuRecalcExceptionStatus(pDstX87);
9411#ifdef LOG_ENABLED
9412 if ((pDstX87->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9413 Log11(("iemCImplCommonFpuRestoreEnv: %04x:%08RX64: %s FPU exception (FCW=%#x FSW=%#x -> %#x)\n",
9414 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fOldFsw & X86_FSW_ES ? "Supressed" : "Raised",
9415 pDstX87->FCW, fOldFsw, pDstX87->FSW));
9416#endif
9417
9418 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
9419 * exceptions are pending after loading the saved state? */
9420}
9421
9422
9423/**
9424 * Implements 'FNSTENV'.
9425 *
9426 * @param enmEffOpSize The operand size (only REX.W really matters).
9427 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9428 * @param GCPtrEffDst The address of the image.
9429 */
9430IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9431{
9432 uint8_t bUnmapInfo;
9433 RTPTRUNION uPtr;
9434 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9435 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE,
9436 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ? */);
9437 if (rcStrict != VINF_SUCCESS)
9438 return rcStrict;
9439
9440 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9441
9442 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
9443 if (rcStrict != VINF_SUCCESS)
9444 return rcStrict;
9445
9446 /* Mask all math exceptions. Any possibly pending exceptions will be cleared. */
9447 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9448 pFpuCtx->FCW |= X86_FCW_XCPT_MASK;
9449#ifdef LOG_ENABLED
9450 uint16_t fOldFsw = pFpuCtx->FSW;
9451#endif
9452 iemFpuRecalcExceptionStatus(pFpuCtx);
9453#ifdef LOG_ENABLED
9454 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9455 Log11(("fnstenv: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9456 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9457#endif
9458
9459 iemHlpUsedFpu(pVCpu);
9460
9461 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9462 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9463}
9464
9465
9466/**
9467 * Implements 'FNSAVE'.
9468 *
9469 * @param enmEffOpSize The operand size.
9470 * @param iEffSeg The effective segment register for @a GCPtrEffDst.
9471 * @param GCPtrEffDst The address of the image.
9472 */
9473IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
9474{
9475 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9476
9477 uint8_t bUnmapInfo;
9478 RTPTRUNION uPtr;
9479 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9480 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE, 3 /** @todo ? */);
9481 if (rcStrict != VINF_SUCCESS)
9482 return rcStrict;
9483
9484 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9485 iemCImplCommonFpuStoreEnv(pVCpu, enmEffOpSize, uPtr);
9486 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9487 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9488 {
9489 paRegs[i].au32[0] = pFpuCtx->aRegs[i].au32[0];
9490 paRegs[i].au32[1] = pFpuCtx->aRegs[i].au32[1];
9491 paRegs[i].au16[4] = pFpuCtx->aRegs[i].au16[4];
9492 }
9493
9494 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
9495 if (rcStrict != VINF_SUCCESS)
9496 return rcStrict;
9497
9498 /* Rotate the stack to account for changed TOS. */
9499 iemFpuRotateStackSetTop(pFpuCtx, 0);
9500
9501 /*
9502 * Re-initialize the FPU context.
9503 */
9504 pFpuCtx->FCW = 0x37f;
9505 pFpuCtx->FSW = 0;
9506 pFpuCtx->FTW = 0x00; /* 0 - empty */
9507 pFpuCtx->FPUDP = 0;
9508 pFpuCtx->DS = 0;
9509 pFpuCtx->Rsrvd2= 0;
9510 pFpuCtx->FPUIP = 0;
9511 pFpuCtx->CS = 0;
9512 pFpuCtx->Rsrvd1= 0;
9513 pFpuCtx->FOP = 0;
9514
9515 iemHlpUsedFpu(pVCpu);
9516 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9517}
9518
9519
9520
9521/**
9522 * Implements 'FLDENV'.
9523 *
9524 * @param enmEffOpSize The operand size (only REX.W really matters).
9525 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9526 * @param GCPtrEffSrc The address of the image.
9527 */
9528IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9529{
9530 uint8_t bUnmapInfo;
9531 RTCPTRUNION uPtr;
9532 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
9533 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R,
9534 enmEffOpSize == IEMMODE_16BIT ? 1 : 3 /** @todo ?*/);
9535 if (rcStrict != VINF_SUCCESS)
9536 return rcStrict;
9537
9538 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9539
9540 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
9541 if (rcStrict != VINF_SUCCESS)
9542 return rcStrict;
9543
9544 iemHlpUsedFpu(pVCpu);
9545 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9546}
9547
9548
9549/**
9550 * Implements 'FRSTOR'.
9551 *
9552 * @param enmEffOpSize The operand size.
9553 * @param iEffSeg The effective segment register for @a GCPtrEffSrc.
9554 * @param GCPtrEffSrc The address of the image.
9555 */
9556IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
9557{
9558 uint8_t bUnmapInfo;
9559 RTCPTRUNION uPtr;
9560 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&uPtr.pv, &bUnmapInfo, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
9561 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R, 3 /** @todo ?*/ );
9562 if (rcStrict != VINF_SUCCESS)
9563 return rcStrict;
9564
9565 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9566 iemCImplCommonFpuRestoreEnv(pVCpu, enmEffOpSize, uPtr);
9567 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
9568 for (uint32_t i = 0; i < RT_ELEMENTS(pFpuCtx->aRegs); i++)
9569 {
9570 pFpuCtx->aRegs[i].au32[0] = paRegs[i].au32[0];
9571 pFpuCtx->aRegs[i].au32[1] = paRegs[i].au32[1];
9572 pFpuCtx->aRegs[i].au32[2] = paRegs[i].au16[4];
9573 pFpuCtx->aRegs[i].au32[3] = 0;
9574 }
9575
9576 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
9577 if (rcStrict != VINF_SUCCESS)
9578 return rcStrict;
9579
9580 iemHlpUsedFpu(pVCpu);
9581 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9582}
9583
9584
9585/**
9586 * Implements 'FLDCW'.
9587 *
9588 * @param u16Fcw The new FCW.
9589 */
9590IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
9591{
9592 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9593
9594 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
9595 /** @todo Testcase: Try see what happens when trying to set undefined bits
9596 * (other than 6 and 7). Currently ignoring them. */
9597 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
9598 * according to FSW. (This is what is currently implemented.) */
9599 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9600 pFpuCtx->FCW = u16Fcw & (~X86_FCW_ZERO_MASK | X86_FCW_IC_MASK); /* Intel 10980xe allows setting the IC bit. Win 3.11 CALC.EXE sets it. */
9601#ifdef LOG_ENABLED
9602 uint16_t fOldFsw = pFpuCtx->FSW;
9603#endif
9604 iemFpuRecalcExceptionStatus(pFpuCtx);
9605#ifdef LOG_ENABLED
9606 if ((pFpuCtx->FSW & X86_FSW_ES) ^ (fOldFsw & X86_FSW_ES))
9607 Log11(("fldcw: %04x:%08RX64: %s FPU exception (FCW=%#x, FSW %#x -> %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
9608 fOldFsw & X86_FSW_ES ? "Supressed" : "Raised", pFpuCtx->FCW, fOldFsw, pFpuCtx->FSW));
9609#endif
9610
9611 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
9612 iemHlpUsedFpu(pVCpu);
9613 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9614}
9615
9616
9617
9618/**
9619 * Implements the underflow case of fxch.
9620 *
9621 * @param iStReg The other stack register.
9622 * @param uFpuOpcode The FPU opcode (for simplicity).
9623 */
9624IEM_CIMPL_DEF_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode)
9625{
9626 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9627
9628 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9629 unsigned const iReg1 = X86_FSW_TOP_GET(pFpuCtx->FSW);
9630 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9631 Assert(!(RT_BIT(iReg1) & pFpuCtx->FTW) || !(RT_BIT(iReg2) & pFpuCtx->FTW));
9632
9633 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
9634 * registers are read as QNaN and then exchanged. This could be
9635 * wrong... */
9636 if (pFpuCtx->FCW & X86_FCW_IM)
9637 {
9638 if (RT_BIT(iReg1) & pFpuCtx->FTW)
9639 {
9640 if (RT_BIT(iReg2) & pFpuCtx->FTW)
9641 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9642 else
9643 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[iStReg].r80;
9644 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
9645 }
9646 else
9647 {
9648 pFpuCtx->aRegs[iStReg].r80 = pFpuCtx->aRegs[0].r80;
9649 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
9650 }
9651 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
9652 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
9653 }
9654 else
9655 {
9656 /* raise underflow exception, don't change anything. */
9657 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
9658 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9659 Log11(("fxch: %04x:%08RX64: Underflow exception (FSW=%#x)\n",
9660 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9661 }
9662
9663 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, uFpuOpcode);
9664 iemHlpUsedFpu(pVCpu);
9665 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9666}
9667
9668
9669/**
9670 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
9671 *
9672 * @param iStReg The other stack register.
9673 * @param fUCmp true for FUCOMI[P], false for FCOMI[P].
9674 * @param uPopAndFpuOpcode Bits 15-0: The FPU opcode.
9675 * Bit 31: Whether we should pop the stack when
9676 * done or not.
9677 */
9678IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode)
9679{
9680 Assert(iStReg < 8);
9681 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_X87);
9682
9683 /*
9684 * Raise exceptions.
9685 */
9686 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS))
9687 return iemRaiseDeviceNotAvailable(pVCpu);
9688
9689 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9690 uint16_t u16Fsw = pFpuCtx->FSW;
9691 if (u16Fsw & X86_FSW_ES)
9692 return iemRaiseMathFault(pVCpu);
9693
9694 /*
9695 * Check if any of the register accesses causes #SF + #IA.
9696 */
9697 bool fPop = RT_BOOL(uPopAndFpuOpcode & RT_BIT_32(31));
9698 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
9699 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
9700 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
9701 {
9702 uint32_t u32Eflags;
9703 if (!fUCmp)
9704 u32Eflags = iemAImpl_fcomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
9705 else
9706 u32Eflags = iemAImpl_fucomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
9707
9708 pFpuCtx->FSW &= ~X86_FSW_C1;
9709 pFpuCtx->FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
9710 if ( !(u16Fsw & X86_FSW_IE)
9711 || (pFpuCtx->FCW & X86_FCW_IM) )
9712 {
9713 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9714 pVCpu->cpum.GstCtx.eflags.u |= u32Eflags & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9715 }
9716 }
9717 else if (pFpuCtx->FCW & X86_FCW_IM)
9718 {
9719 /* Masked underflow. */
9720 pFpuCtx->FSW &= ~X86_FSW_C1;
9721 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
9722 pVCpu->cpum.GstCtx.eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
9723 pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
9724 }
9725 else
9726 {
9727 /* Raise underflow - don't touch EFLAGS or TOP. */
9728 pFpuCtx->FSW &= ~X86_FSW_C1;
9729 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
9730 Log11(("fxch: %04x:%08RX64: Raising IE+SF exception (FSW=%#x)\n",
9731 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
9732 fPop = false;
9733 }
9734
9735 /*
9736 * Pop if necessary.
9737 */
9738 if (fPop)
9739 {
9740 pFpuCtx->FTW &= ~RT_BIT(iReg1);
9741 iemFpuStackIncTop(pVCpu);
9742 }
9743
9744 iemFpuUpdateOpcodeAndIpWorkerEx(pVCpu, pFpuCtx, (uint16_t)uPopAndFpuOpcode);
9745 iemHlpUsedFpu(pVCpu);
9746 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9747}
9748
9749
9750/**
9751 * Implements 'RDSEED'.
9752 *
9753 * @returns VINF_SUCCESS.
9754 * @param iReg The register.
9755 * @param enmEffOpSize The operand size.
9756 */
9757IEM_CIMPL_DEF_2(iemCImpl_rdseed, uint8_t, iReg, IEMMODE, enmEffOpSize)
9758{
9759#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9760 /* Nested-guest VMX intercept. */
9761 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
9762 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDSEED_EXIT))
9763 { /* probable */ }
9764 else
9765 {
9766 Log(("rdseed: Guest intercept -> VM-exit\n"));
9767 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_RDSEED, VMXINSTRID_RDSEED, cbInstr);
9768 }
9769#endif
9770
9771 uint32_t *pEFlags = &pVCpu->cpum.GstCtx.eflags.uBoth;
9772 switch (enmEffOpSize)
9773 {
9774 case IEMMODE_16BIT:
9775 {
9776 PFNIEMAIMPLRDRANDSEEDU16 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdSeed,
9777 &iemAImpl_rdseed_u16,
9778 &iemAImpl_rdseed_u16_fallback);
9779 uint16_t *pu16Dst = iemGRegRefU16(pVCpu, iReg);
9780 (pfnImpl)(pu16Dst, pEFlags);
9781 break;
9782 }
9783 case IEMMODE_32BIT:
9784 {
9785 PFNIEMAIMPLRDRANDSEEDU32 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdSeed,
9786 &iemAImpl_rdseed_u32,
9787 &iemAImpl_rdseed_u32_fallback);
9788 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, iReg);
9789 (pfnImpl)(pu32Dst, pEFlags);
9790 iemGRegStoreU32(pVCpu, iReg, *pu32Dst);
9791 break;
9792 }
9793 case IEMMODE_64BIT:
9794 {
9795 PFNIEMAIMPLRDRANDSEEDU64 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdSeed,
9796 &iemAImpl_rdseed_u64,
9797 &iemAImpl_rdseed_u64_fallback);
9798 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, iReg);
9799 (pfnImpl)(pu64Dst, pEFlags);
9800 break;
9801 }
9802 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9803 }
9804 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9805}
9806
9807
9808/**
9809 * Implements 'RDRAND'.
9810 *
9811 * @returns VINF_SUCCESS.
9812 * @param iReg The register.
9813 * @param enmEffOpSize The operand size.
9814 */
9815IEM_CIMPL_DEF_2(iemCImpl_rdrand, uint8_t, iReg, IEMMODE, enmEffOpSize)
9816{
9817#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9818 /* Nested-guest VMX intercept. */
9819 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
9820 || !IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_RDRAND_EXIT))
9821 { /* probable */ }
9822 else
9823 {
9824 Log(("rdrand: Guest intercept -> VM-exit\n"));
9825 IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(pVCpu, VMX_EXIT_RDRAND, VMXINSTRID_RDRAND, cbInstr);
9826 }
9827#endif
9828
9829 uint32_t *pEFlags = &pVCpu->cpum.GstCtx.eflags.uBoth;
9830 switch (enmEffOpSize)
9831 {
9832 case IEMMODE_16BIT:
9833 {
9834 PFNIEMAIMPLRDRANDSEEDU16 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdRand, &iemAImpl_rdrand_u16,
9835 &iemAImpl_rdrand_u16_fallback);
9836 uint16_t *pu16Dst = iemGRegRefU16(pVCpu, iReg);
9837 (pfnImpl)(pu16Dst, pEFlags);
9838 break;
9839 }
9840 case IEMMODE_32BIT:
9841 {
9842 PFNIEMAIMPLRDRANDSEEDU32 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdRand, &iemAImpl_rdrand_u32,
9843 &iemAImpl_rdrand_u32_fallback);
9844 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, iReg);
9845 (pfnImpl)(pu32Dst, pEFlags);
9846 iemGRegStoreU32(pVCpu, iReg, *pu32Dst);
9847 break;
9848 }
9849 case IEMMODE_64BIT:
9850 {
9851 PFNIEMAIMPLRDRANDSEEDU64 pfnImpl = IEM_SELECT_HOST_OR_FALLBACK(fRdRand, &iemAImpl_rdrand_u64,
9852 &iemAImpl_rdrand_u64_fallback);
9853 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, iReg);
9854 (pfnImpl)(pu64Dst, pEFlags);
9855 break;
9856 }
9857 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9858 }
9859 return iemRegAddToRipAndFinishingClearingRF(pVCpu, cbInstr);
9860}
9861
9862/** @} */
9863
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use